hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f3a04716997d73eaef4e151bd98036259ad059e | 1,183 | py | Python | src/unicon/plugins/nxos/n5k/service_statements.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 18 | 2019-11-23T23:14:53.000Z | 2022-01-10T01:17:08.000Z | src/unicon/plugins/nxos/n5k/service_statements.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 12 | 2020-11-09T20:39:25.000Z | 2022-03-22T12:46:59.000Z | src/unicon/plugins/nxos/n5k/service_statements.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 32 | 2020-02-12T15:42:22.000Z | 2022-03-15T16:42:10.000Z | from unicon.eal.dialogs import Statement
from .service_patterns import NxosN5kReloadPatterns
from unicon.plugins.nxos.service_statements import (login_stmt, password_stmt,
enable_vdc, admin_password)
from unicon.plugins.generic.service_statements import (save_env,
auto_provision, auto_install_dialog,
setup_dialog, confirm_reset,
press_enter, confirm_config, module_reload, save_module_cfg,
secure_passwd_std, )
# for nxos n5k single rp reload
pat = NxosN5kReloadPatterns()
reload_confirm_nxos = Statement(pattern=pat.reload_confirm_nxos,
action='sendline(y)',
loop_continue=True,
continue_timer=False)
# reload statement list for nxos n5k single-rp
nxos_reload_statement_list = [save_env, confirm_reset, reload_confirm_nxos,
press_enter, login_stmt, password_stmt,
confirm_config, setup_dialog,
auto_install_dialog, module_reload,
save_module_cfg, secure_passwd_std,
admin_password, auto_provision, enable_vdc]
| 43.814815 | 78 | 0.658495 | 128 | 1,183 | 5.710938 | 0.40625 | 0.04104 | 0.069767 | 0.057456 | 0.158687 | 0.109439 | 0.109439 | 0.109439 | 0 | 0 | 0 | 0.004745 | 0.287405 | 1,183 | 26 | 79 | 45.5 | 0.862396 | 0.062553 | 0 | 0 | 0 | 0 | 0.009946 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.3 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
3f3edf95fac5cc6b31cb7effd1e2b59006a53ab6 | 4,675 | py | Python | backend/app.py | CMU-IDS-2020/fp-profiler | 45edb7c5f5dfcf34854057476558793bc877f031 | [
"BSD-3-Clause"
] | null | null | null | backend/app.py | CMU-IDS-2020/fp-profiler | 45edb7c5f5dfcf34854057476558793bc877f031 | [
"BSD-3-Clause"
] | null | null | null | backend/app.py | CMU-IDS-2020/fp-profiler | 45edb7c5f5dfcf34854057476558793bc877f031 | [
"BSD-3-Clause"
] | 1 | 2020-11-20T02:56:20.000Z | 2020-11-20T02:56:20.000Z | from flask import Flask, request
import os
from subprocess import Popen, PIPE
import json
from prof_file_util import load_source, load_line_profile, load_graph_profile
from linewise_barchart import linewise_barchart
from valgrind import extract_valgrind_result
from mem_issue_visualize import mem_issue_visualize
app = Flask(__name__)
@app.route('/upload-file', methods = ['POST'])
def hello():
'''
shall return a json dict
if succeeds,
{
'error': 0,
'vega_json': ...
'node_json': ...
'edge_json': ...
...
}
if fails,
{
'error': 1,
'source': formatted source code,
'error_message': the compile failure message
}
'''
code = request.get_json()['code']
# print(code)
local_path = 'temp.c' # TODO: hash file names to handle concurrency issues
with open(local_path, 'w') as f:
f.write(code)
process = Popen(['wc', '-l', local_path], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
# print(output)
# with open('test.json') as f:
# s = json.load(f)
ret_dict = {}
'''
Invoke compiler (if need) and profiler to generate the results.
'''
os.system('clang-format -i {}'.format(local_path))
compile_retvalue = os.system('gcc -g -pg {} -o prog 1> gcc_output 2>&1'.format(local_path))
# handle compiling error
if compile_retvalue != 0:
ret_dict['error'] = 1
ret_dict['source'] = ''.join(list(open(local_path, 'r').readlines()))
ret_dict['error_message'] = ''.join(list(open('gcc_output', 'r').readlines()))
return ret_dict
os.system('./prog')
os.system('ctags --fields=+ne -o - --sort=no {} 1> ctags_output 2>&1'.format(local_path))
os.system('gprof --graph prog gmon.out 1> graph_file 2>&1')
os.system('gprof -l prog gmon.out 1> linewise_file 2>&1')
'''
Now we have the outputs. Visualize and pass it back to the frontend.
'''
# for debug purpose. Only linux can host grof so far.
ret_dict['error'] = 0
if os.path.isfile('linewise_file') and os.path.getsize('linewise_file') > 0\
and os.path.isfile('graph_file') and os.path.getsize('graph_file') > 0:
df = load_line_profile(local_path, 'linewise_file')
chart = linewise_barchart(df)
# chart.save('new.json')
'''
TODO: Maybe the temporary files should be cleared or
stored somewhere serving as history data.
'''
ret_dict['vega_json'] = json.loads(chart.to_json())
graph_dct = load_graph_profile('graph_file')
if graph_dct:
for k, v in graph_dct.items():
ret_dict[k] = v
else:
ret_dict['vega_json'] = json.load(open('test.json', 'r'))
# print(uninitialised_buffer, invalid_write_buffer, mem_leak_dic)
return ret_dict
@app.route('/mem-profile', methods = ['POST'])
def mem_profile():
'''
shall return a json dict
if succeeds,
{
'error': 0,
'vega_json': ...
...
}
if fails,
{
'error': 1,
'source': formatted source code,
'error_message': the compile failure message
}
'''
code = request.get_json()['code']
# print(code)
local_path = 'temp.c' # TODO: hash file names to handle concurrency issues
with open(local_path, 'w') as f:
f.write(code)
process = Popen(['wc', '-l', local_path], stdout=PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
# print(output)
# with open('test.json') as f:
# s = json.load(f)
ret_dict = {}
'''
Invoke compiler (if need) and profiler to generate the results.
'''
os.system('clang-format -i {}'.format(local_path))
compile_retvalue = os.system('gcc -pedantic -g {} -o exec 1> gcc_output 2>&1'.format(local_path))
if compile_retvalue != 0:
ret_dict['error'] = 1
ret_dict['source'] = ''.join(list(open(local_path, 'r').readlines()))
ret_dict['error_message'] = ''.join(list(open('gcc_output', 'r').readlines()))
return ret_dict
os.system('valgrind ./exec > valgrind.txt 2>&1')
uninitialised_buffer, invalid_write_buffer = extract_valgrind_result('other', 'valgrind.txt')
os.system('valgrind --leak-check=full ./exec > valgrind_leak.txt 2>&1')
mem_leak_dic = extract_valgrind_result('memory_leak', 'valgrind_leak.txt')
ret_dict['error'] = 0
vega_chart = mem_issue_visualize(local_path, uninitialised_buffer, invalid_write_buffer, mem_leak_dic)
ret_dict['vega_json'] = json.loads(vega_chart.to_json())
return ret_dict
| 31.802721 | 106 | 0.620535 | 630 | 4,675 | 4.412698 | 0.268254 | 0.045324 | 0.025899 | 0.015108 | 0.561511 | 0.526978 | 0.501439 | 0.501439 | 0.448201 | 0.448201 | 0 | 0.008676 | 0.235722 | 4,675 | 146 | 107 | 32.020548 | 0.769381 | 0.185241 | 0 | 0.470588 | 0 | 0 | 0.201336 | 0 | 0 | 0 | 0 | 0.020548 | 0 | 1 | 0.029412 | false | 0 | 0.117647 | 0 | 0.205882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3f3fcc2c16b2bfd7c2cf31951c3290a8d5c5992d | 355 | py | Python | Level1/Lessons76501/minari-76501.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | null | null | null | Level1/Lessons76501/minari-76501.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | null | null | null | Level1/Lessons76501/minari-76501.py | StudyForCoding/ProgrammersLevel | dc957b1c02cc4383a93b8cbf3d739e6c4d88aa25 | [
"MIT"
] | 1 | 2021-04-05T07:35:59.000Z | 2021-04-05T07:35:59.000Z | def solution(absolutes, signs):
answer = 0
for i in range(len(absolutes)):
if signs[i] is True:
answer += int(absolutes[i])
else:
answer -= int(absolutes[i])
return answer
#1. for문 (len(absolutes)), if signs[i] is true: answer += absolutes[i], else: answer -= absolutes[i]
#2. sum(absolutes) | 29.583333 | 100 | 0.571831 | 47 | 355 | 4.319149 | 0.446809 | 0.197044 | 0.137931 | 0.187192 | 0.315271 | 0.315271 | 0.315271 | 0.315271 | 0 | 0 | 0 | 0.011858 | 0.287324 | 355 | 12 | 101 | 29.583333 | 0.790514 | 0.326761 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3f40172291607ab0c848f7f1917399766b9b515c | 1,082 | py | Python | pyexcel/__init__.py | quis/pyexcel | e02f5ff871ba69184d3fb85fa8960da4e883ebdc | [
"BSD-3-Clause"
] | null | null | null | pyexcel/__init__.py | quis/pyexcel | e02f5ff871ba69184d3fb85fa8960da4e883ebdc | [
"BSD-3-Clause"
] | null | null | null | pyexcel/__init__.py | quis/pyexcel | e02f5ff871ba69184d3fb85fa8960da4e883ebdc | [
"BSD-3-Clause"
] | null | null | null | """
pyexcel
~~~~~~~~~~~~~~~~~~~
**pyexcel** is a wrapper library to read, manipulate and
write data in different excel formats: csv, ods, xls, xlsx
and xlsm. It does not support formulas, styles and charts.
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
# flake8: noqa
from .cookbook import (
merge_csv_to_a_book,
merge_all_to_a_book,
split_a_book,
extract_a_sheet_from_a_book,
)
from .core import (
get_array,
iget_array,
get_dict,
get_records,
iget_records,
get_book_dict,
get_sheet,
get_book,
iget_book,
save_as,
isave_as,
save_book_as,
isave_book_as,
)
from .book import Book
from .sheet import Sheet
from .internal.garbagecollector import free_resources
from .deprecated import (
load_book,
load_book_from_memory,
load,
load_from_memory,
load_from_dict,
load_from_records,
Reader,
SeriesReader,
ColumnSeriesReader,
BookReader,
)
from .__version__ import __version__, __author__
| 21.64 | 62 | 0.686691 | 145 | 1,082 | 4.751724 | 0.517241 | 0.029028 | 0.020319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010804 | 0.230129 | 1,082 | 49 | 63 | 22.081633 | 0.816327 | 0.297597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.189189 | 0 | 0.189189 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3f42306d062bc9168cc3334b385fbe62bb7498d6 | 14,054 | py | Python | bitten/queue.py | SpamExperts/bitten | 924ae157c876eeff7957074b0c51ed4685d4f304 | [
"BSD-3-Clause"
] | null | null | null | bitten/queue.py | SpamExperts/bitten | 924ae157c876eeff7957074b0c51ed4685d4f304 | [
"BSD-3-Clause"
] | 1 | 2020-09-24T05:28:44.000Z | 2020-09-28T05:34:19.000Z | bitten/queue.py | SpamExperts/bitten | 924ae157c876eeff7957074b0c51ed4685d4f304 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2010 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://bitten.edgewall.org/wiki/License.
"""Implements the scheduling of builds for a project.
This module provides the functionality for scheduling builds for a specific
Trac environment. It is used by both the build master and the web interface to
get the list of required builds (revisions not built yet).
Furthermore, the `BuildQueue` class is used by the build master to determine
the next pending build, and to match build slaves against configured target
platforms.
"""
from itertools import ifilter
import re
import time
from trac.util.datefmt import to_timestamp
from trac.util import pretty_timedelta, format_datetime
from trac.attachment import Attachment
from bitten.model import BuildConfig, TargetPlatform, Build, BuildStep
from bitten.util.repository import get_repos
__docformat__ = 'restructuredtext en'
def collect_changes(config, authname=None):
"""Collect all changes for a build configuration that either have already
been built, or still need to be built.
This function is a generator that yields ``(platform, rev, build)`` tuples,
where ``platform`` is a `TargetPlatform` object, ``rev`` is the identifier
of the changeset, and ``build`` is a `Build` object or `None`.
:param config: the build configuration
:param authname: the logged in user
:param db: a database connection (optional)
"""
env = config.env
repos_name, repos, repos_path = get_repos(env, config.path, authname)
with env.db_query as db:
try:
node = repos.get_node(repos_path)
except Exception, e:
env.log.warn('Error accessing path %r for configuration %r',
repos_path, config.name, exc_info=True)
return
for path, rev, chg in node.get_history():
# Don't follow moves/copies
if path != repos.normalize_path(repos_path):
break
# Stay within the limits of the build config
if config.min_rev and repos.rev_older_than(rev, config.min_rev):
break
if config.max_rev and repos.rev_older_than(config.max_rev, rev):
continue
# Make sure the repository directory isn't empty at this
# revision
old_node = repos.get_node(path, rev)
is_empty = True
for entry in old_node.get_entries():
is_empty = False
break
if is_empty:
continue
# For every target platform, check whether there's a build
# of this revision
for platform in TargetPlatform.select(env, config.name):
builds = list(Build.select(env, config.name, rev, platform.id))
if builds:
build = builds[0]
else:
build = None
yield platform, rev, build
class BuildQueue(object):
"""Enapsulates the build queue of an environment.
A build queue manages the the registration of build slaves and detection of
repository revisions that need to be built.
"""
def __init__(self, env, build_all=False, stabilize_wait=0, timeout=0):
"""Create the build queue.
:param env: the Trac environment
:param build_all: whether older revisions should be built
:param stabilize_wait: The time in seconds to wait before considering
the repository stable to create a build in the queue.
:param timeout: the time in seconds after which an in-progress build
should be considered orphaned, and reset to pending
state
"""
self.env = env
self.log = env.log
self.build_all = build_all
self.stabilize_wait = stabilize_wait
self.timeout = timeout
# Build scheduling
def get_build_for_slave(self, name, properties):
"""Check whether one of the pending builds can be built by the build
slave.
:param name: the name of the slave
:type name: `basestring`
:param properties: the slave configuration
:type properties: `dict`
:return: the allocated build, or `None` if no build was found
:rtype: `Build`
"""
self.log.debug('Checking for pending builds...')
self.reset_orphaned_builds()
# Iterate through pending builds by descending revision timestamp, to
# avoid the first configuration/platform getting all the builds
platforms = [p.id for p in self.match_slave(name, properties)]
builds_to_delete = []
build_found = False
for build in Build.select(self.env, status=Build.PENDING):
config_path = BuildConfig.fetch(self.env, name=build.config).path
_name, repos, _path = get_repos(self.env, config_path, None)
if self.should_delete_build(build, repos):
self.log.info('Scheduling build %d for deletion', build.id)
builds_to_delete.append(build)
elif build.platform in platforms:
build_found = True
break
if not build_found:
self.log.debug('No pending builds.')
build = None
# delete any obsolete builds
for build_to_delete in builds_to_delete:
build_to_delete.delete()
if build:
build.slave = name
build.slave_info.update(properties)
build.status = Build.IN_PROGRESS
build.update()
return build
def match_slave(self, name, properties):
"""Match a build slave against available target platforms.
:param name: the name of the slave
:type name: `basestring`
:param properties: the slave configuration
:type properties: `dict`
:return: the list of platforms the slave matched
"""
platforms = []
for config in BuildConfig.select(self.env):
for platform in TargetPlatform.select(self.env, config=config.name):
match = True
for propname, pattern in ifilter(None, platform.rules):
try:
propvalue = properties.get(propname)
if not propvalue or not re.match(pattern,
propvalue, re.I):
match = False
break
except re.error:
self.log.error('Invalid platform matching pattern "%s"',
pattern, exc_info=True)
match = False
break
if match:
self.log.debug('Slave %r matched target platform %r of '
'build configuration %r', name,
platform.name, config.name)
platforms.append(platform)
if not platforms:
self.log.warning('Slave %r matched none of the target platforms',
name)
return platforms
def populate(self):
"""Add a build for the next change on each build configuration to the
queue.
The next change is the latest repository check-in for which there isn't
a corresponding build on each target platform. Repeatedly calling this
method will eventually result in the entire change history of the build
configuration being in the build queue.
"""
builds = []
for config in BuildConfig.select(self.env):
platforms = []
for platform, rev, build in collect_changes(config):
if not self.build_all and platform.id in platforms:
# We've seen this platform already, so these are older
# builds that should only be built if built_all=True
self.log.debug('Ignoring older revisions for configuration '
'%r on %r', config.name, platform.name)
break
platforms.append(platform.id)
if build is None:
self.log.info('Enqueuing build of configuration "%s" at '
'revision [%s] on %s', config.name, rev,
platform.name)
_repos_name, repos, _repos_path = get_repos(
self.env, config.path, None)
rev_time = to_timestamp(repos.get_changeset(rev).date)
age = int(time.time()) - rev_time
if self.stabilize_wait and age < self.stabilize_wait:
self.log.info('Delaying build of revision %s until %s '
'seconds pass. Current age is: %s '
'seconds' % (rev, self.stabilize_wait,
age))
continue
build = Build(self.env, config=config.name,
platform=platform.id, rev=str(rev),
rev_time=rev_time)
builds.append(build)
for build in builds:
try:
build.insert()
except Exception, e:
# really only want to catch IntegrityErrors raised when
# a second slave attempts to add builds with the same
# (config, platform, rev) as an existing build.
self.log.info('Failed to insert build of configuration "%s" '
'at revision [%s] on platform [%s]: %s',
build.config, build.rev, build.platform, e)
raise
def reset_orphaned_builds(self):
"""Reset all in-progress builds to ``PENDING`` state if they've been
running so long that the configured timeout has been reached.
This is used to cleanup after slaves that have unexpectedly cancelled
a build without notifying the master, or are for some other reason not
reporting back status updates.
"""
if not self.timeout:
# If no timeout is set, none of the in-progress builds can be
# considered orphaned
return
with self.env.db_transaction as db:
now = int(time.time())
for build in Build.select(self.env, status=Build.IN_PROGRESS):
if now - build.last_activity < self.timeout:
# This build has not reached the timeout yet, assume it's still
# being executed
continue
self.log.info('Orphaning build %d. Last activity was %s (%s)' % \
(build.id, format_datetime(build.last_activity),
pretty_timedelta(build.last_activity)))
build.status = Build.PENDING
build.slave = None
build.slave_info = {}
build.started = 0
build.stopped = 0
build.last_activity = 0
for step in list(BuildStep.select(self.env, build=build.id)):
step.delete()
build.update()
Attachment.delete_all(self.env, 'build', build.resource.id)
#commit
def should_delete_build(self, build, repos):
config = BuildConfig.fetch(self.env, build.config)
config_name = config and config.name \
or 'unknown config "%s"' % build.config
platform = TargetPlatform.fetch(self.env, build.platform)
# Platform may or may not exist anymore - get safe name for logging
platform_name = platform and platform.name \
or 'unknown platform "%s"' % build.platform
# Drop build if platform no longer exists
if not platform:
self.log.info('Dropping build of configuration "%s" at '
'revision [%s] on %s because the platform no longer '
'exists', config.name, build.rev, platform_name)
return True
# Ignore pending builds for deactived build configs
if not (config and config.active):
self.log.info('Dropping build of configuration "%s" at '
'revision [%s] on %s because the configuration is '
'deactivated', config_name, build.rev, platform_name)
return True
# Stay within the revision limits of the build config
if (config.min_rev and repos.rev_older_than(build.rev,
config.min_rev)) \
or (config.max_rev and repos.rev_older_than(config.max_rev,
build.rev)):
self.log.info('Dropping build of configuration "%s" at revision [%s] on '
'"%s" because it is outside of the revision range of the '
'configuration', config.name, build.rev, platform_name)
return True
# If not 'build_all', drop if a more recent revision is available
if not self.build_all and \
len(list(Build.select(self.env, config=build.config,
min_rev_time=build.rev_time, platform=build.platform))) > 1:
self.log.info('Dropping build of configuration "%s" at revision [%s] '
'on "%s" because a more recent build exists',
config.name, build.rev, platform_name)
return True
return False
| 40.973761 | 85 | 0.570656 | 1,644 | 14,054 | 4.800487 | 0.215937 | 0.015079 | 0.012544 | 0.015966 | 0.168018 | 0.153827 | 0.148758 | 0.134187 | 0.119742 | 0.084136 | 0 | 0.002661 | 0.35819 | 14,054 | 342 | 86 | 41.093567 | 0.872284 | 0.100114 | 0 | 0.179894 | 0 | 0 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.005291 | 0.042328 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3f450a61b8e2b1852d0f1a4d826ca4c04fcbb6db | 10,638 | py | Python | aiida/orm/implementation/querybuilder.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | 1 | 2019-07-31T04:08:13.000Z | 2019-07-31T04:08:13.000Z | aiida/orm/implementation/querybuilder.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | aiida/orm/implementation/querybuilder.py | PercivalN/aiida-core | b215ed5a7ce9342bb7f671b67e95c1f474cc5940 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Backend query implementation classes"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import abc
import six
from aiida.common import exceptions
from aiida.common.lang import abstractclassmethod, type_check
from aiida.common.exceptions import InputValidationError
__all__ = ('BackendQueryBuilder',)
@six.add_metaclass(abc.ABCMeta)
class BackendQueryBuilder(object):
"""Backend query builder interface"""
# pylint: disable=invalid-name,too-many-public-methods,useless-object-inheritance
outer_to_inner_schema = None
inner_to_outer_schema = None
def __init__(self, backend):
"""
:param backend: the backend
"""
from . import backends
type_check(backend, backends.Backend)
self._backend = backend
self.inner_to_outer_schema = dict()
self.outer_to_inner_schema = dict()
@abc.abstractmethod
def Node(self):
"""
Decorated as a property, returns the implementation for DbNode.
It needs to return a subclass of sqlalchemy.Base, which means that for different ORM's
a corresponding dummy-model must be written.
"""
@abc.abstractmethod
def Link(self):
"""
A property, decorated with @property. Returns the implementation for the DbLink
"""
@abc.abstractmethod
def Computer(self):
"""
A property, decorated with @property. Returns the implementation for the Computer
"""
@abc.abstractmethod
def User(self):
"""
A property, decorated with @property. Returns the implementation for the User
"""
@abc.abstractmethod
def Group(self):
"""
A property, decorated with @property. Returns the implementation for the Group
"""
@abc.abstractmethod
def AuthInfo(self):
"""
A property, decorated with @property. Returns the implementation for the AuthInfo
"""
@abc.abstractmethod
def Comment(self):
"""
A property, decorated with @property. Returns the implementation for the Comment
"""
@abc.abstractmethod
def Log(self):
"""
A property, decorated with @property. Returns the implementation for the Log
"""
@abc.abstractmethod
def table_groups_nodes(self):
"""
A property, decorated with @property. Returns the implementation for the many-to-many
relationship between group and nodes.
"""
@property
def AiidaNode(self):
"""
A property, decorated with @property. Returns the implementation for the AiiDA-class for Node
"""
from aiida.orm import Node
return Node
@abc.abstractmethod
def get_session(self):
"""
:returns: a valid session, an instance of sqlalchemy.orm.session.Session
"""
@abc.abstractmethod
def modify_expansions(self, alias, expansions):
"""
Modify names of projections if ** was specified.
This is important for the schema having attributes in a different table.
"""
@abstractclassmethod
def get_filter_expr_from_attributes(cls, operator, value, attr_key, column=None, column_name=None, alias=None): # pylint: disable=too-many-arguments
"""
Returns an valid SQLAlchemy expression.
:param operator: The operator provided by the user ('==', '>', ...)
:param value: The value to compare with, e.g. (5.0, 'foo', ['a','b'])
:param str attr_key:
The path to that attribute as a tuple of values.
I.e. if that attribute I want to filter by is the 2nd element in a list stored under the
key 'mylist', this is ('mylist', '2').
:param column: Optional, an instance of sqlalchemy.orm.attributes.InstrumentedAttribute or
:param str column_name: The name of the column, and the backend should get the InstrumentedAttribute.
:param alias: The aliased class.
:returns: An instance of sqlalchemy.sql.elements.BinaryExpression
"""
@classmethod
def get_corresponding_properties(cls, entity_table, given_properties, mapper):
"""
This method returns a list of updated properties for a given list of properties.
If there is no update for the property, the given property is returned in the list.
"""
if entity_table in mapper.keys():
res = list()
for given_property in given_properties:
res.append(cls.get_corresponding_property(entity_table, given_property, mapper))
return res
return given_properties
@classmethod
def get_corresponding_property(cls, entity_table, given_property, mapper):
"""
This method returns an updated property for a given a property.
If there is no update for the property, the given property is returned.
"""
try:
# Get the mapping for the specific entity_table
property_mapping = mapper[entity_table]
try:
# Get the mapping for the specific property
return property_mapping[given_property]
except KeyError:
# If there is no mapping, the property remains unchanged
return given_property
except KeyError:
# If it doesn't exist, it means that the given_property remains v
return given_property
@classmethod
def get_filter_expr_from_column(cls, operator, value, column):
"""
A method that returns an valid SQLAlchemy expression.
:param operator: The operator provided by the user ('==', '>', ...)
:param value: The value to compare with, e.g. (5.0, 'foo', ['a','b'])
:param column: an instance of sqlalchemy.orm.attributes.InstrumentedAttribute or
:returns: An instance of sqlalchemy.sql.elements.BinaryExpression
"""
# Label is used because it is what is returned for the
# 'state' column by the hybrid_column construct
# Remove when https://github.com/PyCQA/pylint/issues/1931 is fixed
# pylint: disable=no-name-in-module,import-error
from sqlalchemy.sql.elements import Cast, Label
from sqlalchemy.orm.attributes import InstrumentedAttribute, QueryableAttribute
from sqlalchemy.sql.expression import ColumnClause
from sqlalchemy.types import String
if not isinstance(column, (Cast, InstrumentedAttribute, QueryableAttribute, Label, ColumnClause)):
raise TypeError('column ({}) {} is not a valid column'.format(type(column), column))
database_entity = column
if operator == '==':
expr = database_entity == value
elif operator == '>':
expr = database_entity > value
elif operator == '<':
expr = database_entity < value
elif operator == '>=':
expr = database_entity >= value
elif operator == '<=':
expr = database_entity <= value
elif operator == 'like':
# the like operator expects a string, so we cast to avoid problems
# with fields like UUID, which don't support the like operator
expr = database_entity.cast(String).like(value)
elif operator == 'ilike':
expr = database_entity.ilike(value)
elif operator == 'in':
expr = database_entity.in_(value)
else:
raise InputValidationError('Unknown operator {} for filters on columns'.format(operator))
return expr
@abc.abstractmethod
def get_projectable_attribute(self, alias, column_name, attrpath, cast=None, **kwargs):
pass
@abc.abstractmethod
def get_aiida_res(self, key, res):
"""
Some instance returned by ORM (django or SA) need to be converted
to Aiida instances (eg nodes)
:param key: the key that this entry would be returned with
:param res: the result returned by the query
:returns: an aiida-compatible instance
"""
@abc.abstractmethod
def yield_per(self, query, batch_size):
"""
:param int batch_size: Number of rows to yield per step
Yields *count* rows at a time
:returns: a generator
"""
@abc.abstractmethod
def count(self, query):
"""
:returns: the number of results
"""
@abc.abstractmethod
def first(self, query):
"""
Executes query in the backend asking for one instance.
:returns: One row of aiida results
"""
@abc.abstractmethod
def iterall(self, query, batch_size, tag_to_index_dict):
"""
:return: An iterator over all the results of a list of lists.
"""
@abc.abstractmethod
def iterdict(self, query, batch_size, tag_to_projected_properties_dict, tag_to_alias_map):
"""
:returns: An iterator over all the results of a list of dictionaries.
"""
@abc.abstractmethod
def get_column_names(self, alias):
"""
Return the column names of the given table (alias).
"""
def get_column(self, colname, alias): # pylint: disable=no-self-use
"""
Return the column for a given projection.
"""
try:
return getattr(alias, colname)
except AttributeError:
raise exceptions.InputValidationError("{} is not a column of {}\n"
"Valid columns are:\n"
"{}".format(
colname,
alias,
'\n'.join(alias._sa_class_manager.mapper.c.keys()) # pylint: disable=protected-access
))
| 36.682759 | 153 | 0.601805 | 1,185 | 10,638 | 5.305485 | 0.258228 | 0.051376 | 0.060442 | 0.050899 | 0.276284 | 0.24161 | 0.234293 | 0.224749 | 0.188484 | 0.188484 | 0 | 0.001485 | 0.303629 | 10,638 | 289 | 154 | 36.809689 | 0.847192 | 0.42038 | 0 | 0.239669 | 0 | 0 | 0.031997 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214876 | false | 0.008264 | 0.115702 | 0 | 0.421488 | 0.008264 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3f4b0ed4eea9580bec7a5e2d579164110301a866 | 4,095 | py | Python | DTL_tests/unittests/test_api.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | 1 | 2015-03-23T18:52:12.000Z | 2015-03-23T18:52:12.000Z | DTL_tests/unittests/test_api.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | null | null | null | DTL_tests/unittests/test_api.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | 2 | 2017-05-21T12:50:41.000Z | 2021-10-17T03:32:45.000Z | import os
import time
import unittest
from DTL.api import *
class TestCaseApiUtils(unittest.TestCase):
def setUp(self):
apiUtils.synthesize(self, 'mySynthesizeVar', None)
self.bit = apiUtils.BitTracker.getBit(self)
def test_wildcardToRe(self):
self.assertEquals(apiUtils.wildcardToRe('c:\CIG\main\*.*'),
'(?i)c\\:\\\\CIG\\\\main\\\\[^\\\\]*\\.[^\\\\]*$')
self.assertEquals(apiUtils.wildcardToRe('c:\CIG\main\*.*'),
apiUtils.wildcardToRe('c:/CIG/main/*.*'))
def test_synthesize(self):
self.assertIn('_mySynthesizeVar', self.__dict__)
self.assertTrue(hasattr(self, 'mySynthesizeVar'))
self.assertTrue(hasattr(self, 'getMySynthesizeVar'))
self.assertTrue(hasattr(self, 'setMySynthesizeVar'))
self.assertEqual(self.getMySynthesizeVar(), self.mySynthesizeVar)
def test_getClassName(self):
self.assertEqual(apiUtils.getClassName(self), 'TestCaseApiUtils')
def test_bittracker(self):
self.assertEqual(apiUtils.BitTracker.getBit(self), self.bit)
class TestCaseDotifyDict(unittest.TestCase):
def setUp(self):
self.dotifydict = DotifyDict({'one':{'two':{'three':'value'}}})
def test_dotifydict(self):
self.assertEquals(self.dotifydict.one.two, {'three':'value'})
self.dotifydict.one.two.update({'three':3,'four':4})
self.assertEquals(self.dotifydict.one.two.four, 4)
self.assertEquals(self.dotifydict.one, self.dotifydict.one)
self.assertIn('two.three', (self.dotifydict.one))
self.assertEquals(str(self.dotifydict), "DotifyDict(datadict={'one': DotifyDict(datadict={'two': DotifyDict(datadict={'four': 4, 'three': 3})})})")
self.assertEquals(self.dotifydict.one.two, eval(str(self.dotifydict.one.two)))
class TestCasePath(unittest.TestCase):
def setUp(self):
self.filepath = Path.getTempPath()
def test_path(self):
temp_path = Path.getTempPath()
self.assertEquals(self.filepath, temp_path)
self.assertEquals(self.filepath.name, temp_path.name)
self.assertEquals(self.filepath.parent, temp_path.parent)
self.assertIn(self.filepath.parent.parent.name, self.filepath)
myPathSepTest = Path('c:\\Users/krockman/documents').join('mytest')
self.assertEquals(myPathSepTest, os.path.join('c:','Users','krockman','documents','mytest'))
self.assertEquals({'TestKey', myPathSepTest},{'TestKey',os.path.join('c:','Users','krockman','documents','mytest')})
class TestCaseDocument(unittest.TestCase):
def setUp(self):
self.doc = Document({'Testing':'min'})
self.doc.filepath = Path.getTempPath().join('document.dat')
def test_document(self):
self.assertEquals(self.doc.filepath, Path.getTempPath().join('document.dat'))
self.assertEquals(self.doc, eval(str(self.doc)))
self.doc.save()
self.assertTrue(self.doc.filepath.exists())
def tearDown(self):
self.doc.filepath.remove()
class TestCaseVersion(unittest.TestCase):
def setUp(self):
self.version = Version('2.0.5.Beta')
def test_version(self):
self.assertEquals(self.version,(2,0,5,'Beta'))
self.assertEquals(self.version,'2.0.5.Beta')
self.assertEquals(self.version,eval(str(self.version)))
self.version.update({'status':VersionStatus.Gold})
self.assertNotEquals(self.version,(2,0,5,'Beta'))
class TestCaseDecorators(unittest.TestCase):
@Safe
def test_safe(self):
1/0
@Timer
def test_timer(self, timer):
for i in range(5):
time.sleep(2)
timer.newLap(i)
@Profile
def test_profile(self):
for i in range(5):
(1 / 20 * 5 - 10 + 15) == 1
def main():
unittest.main(verbosity=2)
if __name__ == '__main__':
main() | 35.301724 | 155 | 0.616361 | 444 | 4,095 | 5.621622 | 0.218468 | 0.108974 | 0.096154 | 0.048077 | 0.311699 | 0.259615 | 0.171074 | 0.105369 | 0.038061 | 0.038061 | 0 | 0.010133 | 0.228816 | 4,095 | 116 | 156 | 35.301724 | 0.780241 | 0 | 0 | 0.108434 | 0 | 0.012048 | 0.127441 | 0.03833 | 0 | 0 | 0 | 0 | 0.337349 | 1 | 0.216867 | false | 0 | 0.048193 | 0 | 0.337349 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3f4f261effbec9ffc0f629f4f48d599f4fe3ee02 | 752 | py | Python | be/model/db_conn.py | CharlesDDDD/bookstore | 4052a06f5162100f14c4b762f058204792ceb3c3 | [
"Apache-2.0"
] | null | null | null | be/model/db_conn.py | CharlesDDDD/bookstore | 4052a06f5162100f14c4b762f058204792ceb3c3 | [
"Apache-2.0"
] | null | null | null | be/model/db_conn.py | CharlesDDDD/bookstore | 4052a06f5162100f14c4b762f058204792ceb3c3 | [
"Apache-2.0"
] | null | null | null | from be.table.user import User
from be.table.user_store import User_Store
from be.table.store import Store
class DBConn:
def user_id_exist(self, user_id):
row = User.query.filter(User.user_id == user_id).first()
if row is None:
return False
else:
return True
def book_id_exist(self, store_id, book_id):
row = Store.query.filter(Store.store_id == store_id, Store.book_id == book_id).first()
if row is None:
return False
else:
return True
def store_id_exist(self, store_id):
row = User_Store.query.filter(User_Store.store_id == store_id).first()
if row is None:
return False
else:
return True
| 26.857143 | 94 | 0.610372 | 109 | 752 | 4.009174 | 0.220183 | 0.112128 | 0.075515 | 0.08238 | 0.473684 | 0.308924 | 0.308924 | 0.308924 | 0.308924 | 0.308924 | 0 | 0 | 0.305851 | 752 | 27 | 95 | 27.851852 | 0.837165 | 0 | 0 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.136364 | 0 | 0.590909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
3f52ca8d87119aa7ada69b18dd59026206c97a21 | 2,861 | py | Python | tardis/tardis_portal/auth/localdb_auth.py | nrmay/mytardis | 34a460cde6a847c66a6ec3725182c09dc9167bd5 | [
"Apache-2.0"
] | null | null | null | tardis/tardis_portal/auth/localdb_auth.py | nrmay/mytardis | 34a460cde6a847c66a6ec3725182c09dc9167bd5 | [
"Apache-2.0"
] | null | null | null | tardis/tardis_portal/auth/localdb_auth.py | nrmay/mytardis | 34a460cde6a847c66a6ec3725182c09dc9167bd5 | [
"Apache-2.0"
] | null | null | null | '''
Local DB Authentication module.
.. moduleauthor:: Gerson Galang <gerson.galang@versi.edu.au>
'''
import logging
from django.contrib.auth.models import User, Group
from django.contrib.auth.backends import ModelBackend
from tardis.tardis_portal.auth.interfaces import AuthProvider, GroupProvider, UserProvider
logger = logging.getLogger(__name__)
auth_key = u'localdb'
auth_display_name = u'Local DB'
_modelBackend = ModelBackend()
class DjangoAuthBackend(AuthProvider):
"""Authenticate against Django's Model Backend.
"""
def authenticate(self, request):
"""authenticate a user, this expect the user will be using
form based auth and the *username* and *password* will be
passed in as **POST** variables.
:param request: a HTTP Request instance
:type request: :class:`django.http.HttpRequest`
"""
username = request.POST['username']
password = request.POST['password']
if not username or not password:
return None
return _modelBackend.authenticate(username, password)
def get_user(self, user_id):
try:
user = User.objects.get(username=user_id)
except User.DoesNotExist:
user = None
return user
class DjangoGroupProvider(GroupProvider):
name = u'django_group'
def getGroups(self, user):
"""return an iteration of the available groups.
"""
groups = user.groups.all()
return [g.id for g in groups]
def getGroupById(self, id):
"""return the group associated with the id::
{"id": 123,
"display": "Group Name",}
"""
groupObj = Group.objects.get(id=id)
if groupObj:
return {'id': id, 'display': groupObj.name}
return None
def searchGroups(self, **filter):
result = []
groups = Group.objects.filter(**filter)
for g in groups:
users = [u.username for u in User.objects.filter(groups=g)]
result += [{'id': g.id,
'display': g.name,
'members': users}]
return result
class DjangoUserProvider(UserProvider):
name = u'django_user'
def getUserById(self, id):
"""
return the user dictionary in the format of::
{"id": 123,
"first_name": "John",
"last_name": "Smith",
"email": "john@example.com"}
"""
try:
userObj = User.objects.get(username=id)
return {'id': id,
'first_name': userObj.first_name,
'last_name': userObj.last_name,
'email': userObj.email}
except User.DoesNotExist:
return None
django_user = DjangoUserProvider.name
django_group = DjangoGroupProvider.name
| 26.009091 | 90 | 0.595246 | 311 | 2,861 | 5.401929 | 0.350482 | 0.009524 | 0.020238 | 0.025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002994 | 0.299546 | 2,861 | 109 | 91 | 26.247706 | 0.835329 | 0.231737 | 0 | 0.132075 | 0 | 0 | 0.051496 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0.056604 | 0.075472 | 0 | 0.45283 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
3f6a18b0d4c80fcdd062def647e4e3d88b2df3b9 | 55,602 | py | Python | usdzconvert/usdStageWithFbx.py | summertriangle-dev/usdzconvert-docker | 9953845f3a83f8cc3d5380a4ccae8bc39753d550 | [
"MIT"
] | 3 | 2021-03-10T00:34:18.000Z | 2021-10-14T02:52:41.000Z | usdzconvert/usdStageWithFbx.py | summertriangle-dev/usdzconvert-docker | 9953845f3a83f8cc3d5380a4ccae8bc39753d550 | [
"MIT"
] | null | null | null | usdzconvert/usdStageWithFbx.py | summertriangle-dev/usdzconvert-docker | 9953845f3a83f8cc3d5380a4ccae8bc39753d550 | [
"MIT"
] | null | null | null | from pxr import *
import os, os.path
import numpy
import re
import usdUtils
import math
import imp
usdStageWithFbxLoaded = True
try:
imp.find_module('fbx')
import fbx
except ImportError:
usdUtils.printError("Failed to import fbx module. Please install FBX Python bindings from http://www.autodesk.com/fbx and add path to FBX Python SDK to your PYTHONPATH")
usdStageWithFbxLoaded = False
class ConvertError(Exception):
pass
def printErrorAndExit(message):
usdUtils.printError(message)
raise ConvertError()
def GfMatrix4dWithFbxMatrix(m):
return Gf.Matrix4d(
m[0][0], m[0][1], m[0][2], m[0][3],
m[1][0], m[1][1], m[1][2], m[1][3],
m[2][0], m[2][1], m[2][2], m[2][3],
m[3][0], m[3][1], m[3][2], m[3][3])
def getFbxNodeTransforms(fbxNode):
return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateLocalTransform())
def getFbxNodeGeometricTransform(fbxNode):
# geometry transform is an additional transform for geometry
# it is relative to the node transform
# this transform is not distributing to the children nodes in scene graph
translation = fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot)
rotation = fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot)
scale = fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot)
return fbx.FbxAMatrix(translation, rotation, scale)
def convertUVTransformFromFBX(translation, scale, rotation):
# from FBX to Blender
scale[0] = 1.0 / scale[0]
scale[1] = 1.0 / scale[1]
rotation = -rotation
# Blender: Tuv = T * R * S
# USD: Tuv = S * R * T
scaleMatrix = Gf.Matrix4d(Gf.Vec4d(scale[0], scale[1], 1, 1))
inverseScaleMatrix = Gf.Matrix4d(Gf.Vec4d(1.0 / scale[0], 1.0 / scale[1], 1, 1))
rotationMatrix = Gf.Matrix4d(
math.cos(rotation), math.sin(rotation), 0, 0,
-math.sin(rotation), math.cos(rotation), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1)
inverseRotationMatrix = rotationMatrix.GetTranspose()
translateMatrix = Gf.Matrix4d(1)
translateMatrix.SetTranslate(Gf.Vec3d(translation[0], translation[1], 0))
# translate matrix from Blender to USD
transform = scaleMatrix * rotationMatrix * translateMatrix * inverseRotationMatrix * inverseScaleMatrix
translation3d = transform.ExtractTranslation()
translation[0] = translation3d[0]
translation[1] = translation3d[1]
return translation, scale, math.degrees(rotation)
class FbxNodeManager(usdUtils.NodeManager):
def __init__(self, value=None):
usdUtils.NodeManager.__init__(self)
def overrideGetName(self, fbxNode):
return usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1])
def overrideGetChildren(self, fbxNode):
children = []
for childIdx in xrange(fbxNode.GetChildCount()):
children.append(fbxNode.GetChild(childIdx))
return children
def overrideGetLocalTransformGfMatrix4d(self, fbxNode):
return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateLocalTransform())
def overrideGetWorldTransformGfMatrix4d(self, fbxNode):
return GfMatrix4dWithFbxMatrix(fbxNode.EvaluateGlobalTransform())
def overrideGetParent(self, fbxNode):
return fbxNode.GetParent()
class AnimProperty:
def __init__(self, fbxAnimLayer, fbxProperty, timeSpans):
self.fbxAnimLayer = fbxAnimLayer
self.fbxProperty = fbxProperty
self.timeSpans = timeSpans
class FbxConverter:
def __init__(self, fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose):
self.verbose = verbose
self.legacyModifier = legacyModifier
self.copyTextures = copyTextures
self.searchPaths = searchPaths
self.asset = usdUtils.Asset(usdPath)
self.usdStage = None
self.usdMaterials = {}
self.nodeId = 0
self.nodePaths = {}
self.fbxSkinToSkin = {}
self.startAnimationTime = 0
self.stopAnimationTime = 0
self.skeletonByNode = {} # collect skinned mesh to construct later
self.blendShapeByNode = {} # collect blend shapes to construct later
self.copiedTextures = {} # avoid copying textures more then once
self.extent = [[], []]
self.fbxScene = None
filenameFull = fbxPath.split('/')[-1]
self.srcFolder = fbxPath[:len(fbxPath)-len(filenameFull)]
filenameFull = usdPath.split('/')[-1]
self.dstFolder = usdPath[:len(usdPath)-len(filenameFull)]
self.loadFbxScene(fbxPath)
self.fps = fbx.FbxTime.GetFrameRate(fbx.FbxTime.GetGlobalTimeMode())
self.asset.setFPS(self.fps)
self.nodeManager = FbxNodeManager()
self.skinning = usdUtils.Skinning(self.nodeManager)
self.shapeBlending = usdUtils.ShapeBlending()
def loadFbxScene(self, fbxPath):
fbxManager = fbx.FbxManager.Create()
if not fbxManager:
printErrorAndExit("failed to create FBX manager object")
self.fbxManager = fbxManager
fbxIOSettings = fbx.FbxIOSettings.Create(fbxManager, fbx.IOSROOT)
fbxManager.SetIOSettings(fbxIOSettings)
fbxImporter = fbx.FbxImporter.Create(fbxManager, "")
result = fbxImporter.Initialize(fbxPath, -1, fbxManager.GetIOSettings())
if not result:
printErrorAndExit("failed to initialize FbxImporter object")
if fbxImporter.IsFBX():
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_MATERIAL, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_TEXTURE, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_EMBEDDED, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_SHAPE, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_GOBO, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_ANIMATION, True)
fbxManager.GetIOSettings().SetBoolProp(fbx.EXP_FBX_GLOBAL_SETTINGS, True)
self.fbxScene = fbx.FbxScene.Create(fbxManager, "")
result = fbxImporter.Import(self.fbxScene)
fbxImporter.Destroy()
if not result:
printErrorAndExit("failed to load FBX scene")
def getTextureProperties(self, materialProperty):
if materialProperty.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxFileTexture.ClassId)) > 0:
fbxFileTexture = materialProperty.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxFileTexture.ClassId), 0)
texCoordSet = 'st'
if fbxFileTexture.UVSet is not None:
texCoordSet = str(fbxFileTexture.UVSet.Get())
if texCoordSet == '' or texCoordSet == 'default':
texCoordSet = 'st'
else:
texCoordSet = usdUtils.makeValidIdentifier(texCoordSet)
wrapS = usdUtils.WrapMode.repeat
wrapT = usdUtils.WrapMode.repeat
if fbxFileTexture.GetWrapModeU() == fbx.FbxTexture.eClamp:
wrapS = usdUtils.WrapMode.clamp
if fbxFileTexture.GetWrapModeV() == fbx.FbxTexture.eClamp:
wrapT = usdUtils.WrapMode.clamp
# texture transform
mapTransform = None
translation = [fbxFileTexture.GetTranslationU(), fbxFileTexture.GetTranslationV()]
scale = [fbxFileTexture.GetScaleU(), fbxFileTexture.GetScaleV()]
rotation = fbxFileTexture.GetRotationW()
if (translation[0] != 0 or translation[1] != 0 or
scale[0] != 1 or scale[1] != 1 or
rotation != 0):
(translation, scale, rotation) = convertUVTransformFromFBX(translation, scale, rotation)
mapTransform = usdUtils.MapTransform(translation, scale, rotation)
return fbxFileTexture.GetFileName(), texCoordSet, wrapS, wrapT, mapTransform
elif materialProperty.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxLayeredTexture.ClassId)) > 0:
pass
return '', 'st', usdUtils.WrapMode.repeat, usdUtils.WrapMode.repeat, None
def processMaterialProperty(self, input, propertyName, property, factorProperty, channels, material, fbxMaterial):
value = None
factor = float(factorProperty.Get()) if factorProperty is not None else None
if property is not None:
if channels == 'rgb':
value = [property.Get()[0], property.Get()[1], property.Get()[2]]
else:
if input == usdUtils.InputName.opacity:
transparency = property.Get()[0]
if factor is not None:
transparency = transparency * factor
factor = None
value = 1.0 - transparency
else:
value = float(property.Get()[0])
srcTextureFilename = '' # source texture filename on drive
textureFilename = '' # valid for USD
materialProperty = fbxMaterial.FindProperty(propertyName)
if materialProperty.IsValid():
srcTextureFilename, texCoordSet, wrapS, wrapT, mapTransform = self.getTextureProperties(materialProperty)
srcTextureFilename = usdUtils.resolvePath(srcTextureFilename, self.srcFolder, self.searchPaths)
textureFilename = usdUtils.makeValidPath(srcTextureFilename)
if textureFilename != '' and (self.copyTextures or srcTextureFilename != textureFilename):
if srcTextureFilename in self.copiedTextures:
textureFilename = self.copiedTextures[srcTextureFilename]
else:
newTextureFilename = 'textures/' + os.path.basename(textureFilename)
# do not rewrite the texture with same basename
subfolderIdx = 0
while newTextureFilename in self.copiedTextures.values():
newTextureFilename = 'textures/' + str(subfolderIdx) + '/' + os.path.basename(textureFilename)
subfolderIdx += 1
usdUtils.copy(srcTextureFilename, self.dstFolder + newTextureFilename, self.verbose)
self.copiedTextures[srcTextureFilename] = newTextureFilename
textureFilename = newTextureFilename
if textureFilename != '':
scale = None
if factor is not None:
if channels == 'rgb':
scale = [factor, factor, factor]
else:
scale = factor
material.inputs[input] = usdUtils.Map(channels, textureFilename, value, texCoordSet, wrapS, wrapT, scale, mapTransform)
else:
if value is not None:
if factor is not None:
if channels == 'rgb':
material.inputs[input] = [value[0] * factor, value[1] * factor, value[2] * factor]
else:
material.inputs[input] = value * factor
else:
material.inputs[input] = value
def processMaterials(self):
for i in range(self.fbxScene.GetMaterialCount()):
fbxMaterial = self.fbxScene.GetMaterial(i)
material = usdUtils.Material(fbxMaterial.GetName().split(":")[-1])
normalMap = fbxMaterial.NormalMap if hasattr(fbxMaterial, 'NormalMap') else None
self.processMaterialProperty(usdUtils.InputName.normal, fbx.FbxSurfaceMaterial.sNormalMap, normalMap, None, 'rgb', material, fbxMaterial)
diffuse = fbxMaterial.Diffuse if hasattr(fbxMaterial, 'Diffuse') else None
diffuseFactor = fbxMaterial.DiffuseFactor if hasattr(fbxMaterial, 'DiffuseFactor') else None
self.processMaterialProperty(usdUtils.InputName.diffuseColor, fbx.FbxSurfaceMaterial.sDiffuse, diffuse, diffuseFactor, 'rgb', material, fbxMaterial)
transparentColor = fbxMaterial.TransparentColor if hasattr(fbxMaterial, 'TransparentColor') else None
transparencyFactor = fbxMaterial.TransparencyFactor if hasattr(fbxMaterial, 'TransparencyFactor') else None
self.processMaterialProperty(usdUtils.InputName.opacity, fbx.FbxSurfaceMaterial.sTransparentColor, transparentColor, transparencyFactor, 'a', material, fbxMaterial)
emissive = fbxMaterial.Emissive if hasattr(fbxMaterial, 'Emissive') else None
emissiveFactor = fbxMaterial.EmissiveFactor if hasattr(fbxMaterial, 'EmissiveFactor') else None
self.processMaterialProperty(usdUtils.InputName.emissiveColor, fbx.FbxSurfaceMaterial.sEmissive, emissive, emissiveFactor, 'rgb', material, fbxMaterial)
ambient = fbxMaterial.Ambient if hasattr(fbxMaterial, 'Ambient') else None
ambientFactor = fbxMaterial.AmbientFactor if hasattr(fbxMaterial, 'AmbientFactor') else None
self.processMaterialProperty(usdUtils.InputName.occlusion, fbx.FbxSurfaceMaterial.sAmbient, ambient, ambientFactor, 'r', material, fbxMaterial)
# 'metallic', 'roughness' ?
usdMaterial = material.makeUsdMaterial(self.asset)
if self.legacyModifier is not None:
self.legacyModifier.opacityAndDiffuseOneTexture(material)
self.usdMaterials[fbxMaterial.GetName()] = usdMaterial
def prepareAnimations(self):
animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId))
if animStacksCount < 1:
if self.verbose:
print 'No animation found'
return
fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), 0)
timeSpan = fbxAnimStack.GetLocalTimeSpan()
self.startAnimationTime = timeSpan.GetStart().GetSecondDouble()
self.stopAnimationTime = timeSpan.GetStop().GetSecondDouble()
self.asset.extentTime(self.startAnimationTime)
self.asset.extentTime(self.stopAnimationTime)
def processControlPoints(self, fbxMesh, usdMesh):
points = [Gf.Vec3f(p[0], p[1], p[2]) for p in fbxMesh.GetControlPoints()]
extent = Gf.Range3f()
for point in points:
extent.UnionWith(point)
usdMesh.CreatePointsAttr(points)
usdMesh.CreateExtentAttr([Gf.Vec3f(extent.GetMin()), Gf.Vec3f(extent.GetMax())])
if not any(self.extent):
self.extent[0] = extent.GetMin()
self.extent[1] = extent.GetMax()
else:
for i in range(3):
self.extent[0][i] = min(self.extent[0][i], extent.GetMin()[i])
self.extent[1][i] = max(self.extent[1][i], extent.GetMax()[i])
def getVec3fArrayWithLayerElements(self, elements, fbxLayerElements):
elementsArray = fbxLayerElements.GetDirectArray()
for i in xrange(elementsArray.GetCount()):
element = elementsArray.GetAt(i)
elements.append(Gf.Vec3f(element[0], element[1], element[2]))
def getIndicesWithLayerElements(self, fbxMesh, fbxLayerElements):
mappingMode = fbxLayerElements.GetMappingMode()
referenceMode = fbxLayerElements.GetReferenceMode()
indexToDirect = (
referenceMode == fbx.FbxLayerElement.eIndexToDirect or
referenceMode == fbx.FbxLayerElement.eIndex)
indices = []
if mappingMode == fbx.FbxLayerElement.eByControlPoint:
if indexToDirect:
for contorlPointIdx in xrange(fbxMesh.GetControlPointsCount()):
indices.append(fbxLayerElements.GetIndexArray().GetAt(contorlPointIdx))
elif mappingMode == fbx.FbxLayerElement.eByPolygonVertex:
pointIdx = 0
for polygonIdx in xrange(fbxMesh.GetPolygonCount()):
for vertexIdx in xrange(fbxMesh.GetPolygonSize(polygonIdx)):
if indexToDirect:
indices.append(fbxLayerElements.GetIndexArray().GetAt(pointIdx))
else:
indices.append(pointIdx)
pointIdx += 1
elif mappingMode == fbx.FbxLayerElement.eByPolygon:
for polygonIdx in xrange(fbxMesh.GetPolygonCount()):
if indexToDirect:
indices.append(fbxLayerElements.GetIndexArray().GetAt(polygonIdx))
else:
indices.append(polygonIdx)
return indices
def getInterpolationWithLayerElements(self, fbxLayerElements):
mappingMode = fbxLayerElements.GetMappingMode()
if mappingMode == fbx.FbxLayerElement.eByControlPoint:
return UsdGeom.Tokens.vertex
elif mappingMode == fbx.FbxLayerElement.eByPolygonVertex:
return UsdGeom.Tokens.faceVarying
elif mappingMode == fbx.FbxLayerElement.eByPolygon:
return UsdGeom.Tokens.uniform
elif mappingMode == fbx.FbxLayerElement.eAllSame:
return UsdGeom.Tokens.constant
elif mappingMode == fbx.FbxLayerElement.eByEdge:
usdUtils.printWarning("Mapping mode eByEdge for layer elements is not supported.")
return ''
def processNormals(self, fbxMesh, usdMesh, vertexIndices):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerNormals = fbxMesh.GetLayer(layerIdx).GetNormals()
if fbxLayerNormals is None:
continue
normals = []
self.getVec3fArrayWithLayerElements(normals, fbxLayerNormals)
if not any(normals):
continue
indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerNormals)
interpolation = self.getInterpolationWithLayerElements(fbxLayerNormals)
normalPrimvar = usdMesh.CreatePrimvar('normals', Sdf.ValueTypeNames.Normal3fArray, interpolation)
normalPrimvar.Set(normals)
if len(indices) != 0:
normalPrimvar.SetIndices(Vt.IntArray(indices))
break # normals can be in one layer only
def processUVs(self, fbxMesh, usdMesh, vertexIndices):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerUVs = fbxMesh.GetLayer(layerIdx).GetUVs() # get diffuse texture uv-s
if fbxLayerUVs is None:
continue
uvs = []
uvArray = fbxLayerUVs.GetDirectArray()
for i in xrange(uvArray.GetCount()):
uv = uvArray.GetAt(i)
uvs.append(Gf.Vec2f(uv[0], uv[1]))
if not any(uvs):
continue
indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerUVs)
interpolation = self.getInterpolationWithLayerElements(fbxLayerUVs)
texCoordSet = 'st'
uvSets = fbxMesh.GetLayer(layerIdx).GetUVSets()
if len(uvSets) > 0:
fbxLayerElementUV = fbxMesh.GetLayer(layerIdx).GetUVSets()[0]
texCoordSet = str(fbxLayerElementUV.GetName())
if layerIdx == 0 or texCoordSet == '' or texCoordSet == 'default':
texCoordSet = 'st'
else:
texCoordSet = usdUtils.makeValidIdentifier(texCoordSet)
uvPrimvar = usdMesh.CreatePrimvar(texCoordSet, Sdf.ValueTypeNames.Float2Array, interpolation)
uvPrimvar.Set(uvs)
if len(indices) != 0:
uvPrimvar.SetIndices(Vt.IntArray(indices))
def processVertexColors(self, fbxMesh, usdMesh, vertexIndices):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerColors = fbxMesh.GetLayer(layerIdx).GetVertexColors()
if fbxLayerColors is None:
continue
colors = []
colorArray = fbxLayerColors.GetDirectArray()
for i in xrange(colorArray.GetCount()):
fbxColor = colorArray.GetAt(i)
colors.append(Gf.Vec3f(fbxColor.mRed, fbxColor.mGreen, fbxColor.mBlue))
if not any(colors):
continue
indices = self.getIndicesWithLayerElements(fbxMesh, fbxLayerColors)
interpolation = self.getInterpolationWithLayerElements(fbxLayerColors)
displayColorPrimvar = usdMesh.CreateDisplayColorPrimvar(interpolation)
displayColorPrimvar.Set(colors)
if len(indices) != 0:
displayColorPrimvar.SetIndices(Vt.IntArray(indices))
break # vertex colors can be in one layer only
def applySkinning(self, fbxNode, fbxSkin, usdMesh, indices):
skin = self.fbxSkinToSkin[fbxSkin]
skeleton = skin.skeleton
maxPointIndex = 0
for clusterIdx in range(fbxSkin.GetClusterCount()):
fbxCluster = fbxSkin.GetCluster(clusterIdx)
for i in range(fbxCluster.GetControlPointIndicesCount()):
pointIndex = fbxCluster.GetControlPointIndices()[i]
if maxPointIndex < pointIndex:
maxPointIndex = pointIndex
vertexCount = maxPointIndex + 1 # should be equal to number of vertices: max(indices) + 1
jointIndicesPacked = [[] for i in range(vertexCount)]
weightsPacked = [[] for i in range(vertexCount)]
for clusterIdx in range(fbxSkin.GetClusterCount()):
fbxCluster = fbxSkin.GetCluster(clusterIdx)
for i in range(fbxCluster.GetControlPointIndicesCount()):
pointIndex = fbxCluster.GetControlPointIndices()[i]
jointIndicesPacked[pointIndex].append(skin.remapIndex(clusterIdx))
weightsPacked[pointIndex].append(float(fbxCluster.GetControlPointWeights()[i]))
components = 0
for indicesPerVertex in jointIndicesPacked:
if components < len(indicesPerVertex):
components = len(indicesPerVertex)
jointIndices = [0] * vertexCount * components
weights = [float(0)] * vertexCount * components
for i in range(vertexCount):
indicesPerVertex = jointIndicesPacked[i]
for j in range(len(indicesPerVertex)):
jointIndices[i * components + j] = indicesPerVertex[j]
weights[i * components + j] = weightsPacked[i][j]
weights = Vt.FloatArray(weights)
UsdSkel.NormalizeWeights(weights, components)
usdSkelBinding = UsdSkel.BindingAPI(usdMesh)
usdSkelBinding.CreateJointIndicesPrimvar(False, components).Set(jointIndices)
usdSkelBinding.CreateJointWeightsPrimvar(False, components).Set(weights)
bindTransformWasNotFound = True
bindTransform = Gf.Matrix4d(1)
for i in range(self.fbxScene.GetPoseCount()):
fbxPose = self.fbxScene.GetPose(i)
if fbxPose is None:
continue
nodeIndex = fbxPose.Find(fbxNode)
if nodeIndex > -1 and (fbxPose.IsBindPose() or not fbxPose.IsLocalMatrix(nodeIndex)):
bindTransform = GfMatrix4dWithFbxMatrix(fbxPose.GetMatrix(nodeIndex))
bindTransformWasNotFound = False
break
if bindTransformWasNotFound and fbxSkin.GetClusterCount() > 0:
if self.verbose:
usdUtils.printWarning("can't find a bind pose for mesh " + fbxNode.GetName() + ". Trying to calculate.")
# FBX stores bind transform matrix for the skin in each cluster
# get it from the first one
fbxCluster = fbxSkin.GetCluster(0)
fbxBindTransform = fbx.FbxAMatrix()
fbxBindTransform = fbxCluster.GetTransformMatrix(fbxBindTransform)
bindTransform = GfMatrix4dWithFbxMatrix(fbxBindTransform)
bindTransform = GfMatrix4dWithFbxMatrix(getFbxNodeGeometricTransform(fbxNode)) * bindTransform
usdSkelBinding.CreateGeomBindTransformAttr(bindTransform)
usdSkelBinding.CreateSkeletonRel().AddTarget(skeleton.usdSkeleton.GetPath())
if self.legacyModifier is not None:
self.legacyModifier.addSkelAnimToMesh(usdMesh, skeleton)
def bindRigidDeformation(self, fbxNode, usdMesh, skeleton):
bindTransform = GfMatrix4dWithFbxMatrix(fbxNode.EvaluateGlobalTransform())
bindTransform = GfMatrix4dWithFbxMatrix(getFbxNodeGeometricTransform(fbxNode)) * bindTransform
skeleton.bindRigidDeformation(fbxNode, usdMesh, GfMatrix4dWithFbxMatrix(bindTransform))
if self.legacyModifier is not None:
self.legacyModifier.addSkelAnimToMesh(usdMesh, skeleton)
def bindMaterials(self, fbxMesh, usdMesh):
for layerIdx in xrange(fbxMesh.GetLayerCount()):
fbxLayerMaterials = fbxMesh.GetLayer(layerIdx).GetMaterials()
if not fbxLayerMaterials:
continue
# looks like there is a bug in FBX SDK:
# GetDirectArray() does not work if .GetCount() has not been called
materialsCount = fbxLayerMaterials.GetDirectArray().GetCount()
if fbxLayerMaterials.GetIndexArray().GetCount() > 1 and fbxLayerMaterials.GetMappingMode() == fbx.FbxLayerElement.eByPolygon:
# subsets
subsets = [[] for i in range(materialsCount)]
for polygonIdx in range(fbxLayerMaterials.GetIndexArray().GetCount()):
materialIndex = fbxLayerMaterials.GetIndexArray().GetAt(polygonIdx)
subsets[materialIndex].append(polygonIdx)
bindingAPI = UsdShade.MaterialBindingAPI(usdMesh)
for materialIndex in range(materialsCount):
facesCount = len(subsets[materialIndex])
if facesCount > 0:
fbxMaterial = fbxLayerMaterials.GetDirectArray().GetAt(materialIndex)
materialName = usdUtils.makeValidIdentifier(fbxMaterial.GetName())
subsetName = materialName + '_subset'
if self.verbose:
print ' subset:', subsetName, 'faces:', facesCount
usdSubset = UsdShade.MaterialBindingAPI.CreateMaterialBindSubset(bindingAPI, subsetName, Vt.IntArray(subsets[materialIndex]))
usdMaterial = self.usdMaterials[fbxMaterial.GetName()]
UsdShade.MaterialBindingAPI(usdSubset).Bind(usdMaterial)
elif fbxLayerMaterials.GetIndexArray().GetCount() > 0:
# one material for whole mesh
fbxMaterial = fbxLayerMaterials.GetDirectArray().GetAt(0)
if fbxMaterial is not None and fbxMaterial.GetName() in self.usdMaterials:
usdMaterial = self.usdMaterials[fbxMaterial.GetName()]
UsdShade.Material.Bind(usdMaterial, usdMesh.GetPrim())
def getFbxMesh(self, fbxNode):
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if (fbx.FbxNodeAttribute.eMesh == fbxAttributeType or
fbx.FbxNodeAttribute.eSubDiv == fbxAttributeType):
return fbxNodeAttribute
return None
def getFbxSkin(self, fbxNode):
fbxMesh = self.getFbxMesh(fbxNode)
if fbxMesh is not None and fbxMesh.GetDeformerCount(fbx.FbxDeformer.eSkin) > 0:
return fbxMesh.GetDeformer(0, fbx.FbxDeformer.eSkin)
return None
def getFbxBlenShape(self, fbxNode):
fbxMesh = self.getFbxMesh(fbxNode)
if fbxMesh is not None and fbxMesh.GetDeformerCount(fbx.FbxDeformer.eBlendShape) > 0:
return fbxMesh.GetDeformer(0, fbx.FbxDeformer.eBlendShape)
return None
def processMesh(self, fbxNode, newPath, underSkeleton, indent):
usdMesh = UsdGeom.Mesh.Define(self.usdStage, newPath)
fbxMesh = fbxNode.GetNodeAttribute()
if fbx.FbxNodeAttribute.eSubDiv == fbxMesh.GetAttributeType():
fbxMesh = fbxMesh.GetBaseMesh()
else:
usdMesh.CreateSubdivisionSchemeAttr(UsdGeom.Tokens.none)
indices = []
faceVertexCounts = []
for polygonIdx in xrange(fbxMesh.GetPolygonCount()):
polygonSize = fbxMesh.GetPolygonSize(polygonIdx)
faceVertexCounts.append(polygonSize)
for polygonVertexIdx in xrange(polygonSize):
index = fbxMesh.GetPolygonVertex(polygonIdx, polygonVertexIdx)
indices.append(index)
usdMesh.CreateFaceVertexCountsAttr(faceVertexCounts)
usdMesh.CreateFaceVertexIndicesAttr(indices)
# positions, normals, texture coordinates
self.processControlPoints(fbxMesh, usdMesh)
self.processNormals(fbxMesh, usdMesh, indices)
self.processUVs(fbxMesh, usdMesh, indices)
self.processVertexColors(fbxMesh, usdMesh, indices)
fbxSkin = self.getFbxSkin(fbxNode)
if fbxSkin is not None:
self.applySkinning(fbxNode, fbxSkin, usdMesh, indices)
elif underSkeleton is not None:
self.bindRigidDeformation(fbxNode, usdMesh, underSkeleton)
if self.verbose:
type = 'Mesh'
if fbxSkin is not None:
type = 'Skinned mesh'
elif underSkeleton is not None:
type = 'Rigid skinned mesh'
print indent + type + ': ' + fbxNode.GetName()
self.bindMaterials(fbxMesh, usdMesh)
return usdMesh
def addTranslateOpIfNotEmpty(self, prim, op, name = ''):
if op != fbx.FbxVector4(0, 0, 0, 1):
prim.AddTranslateOp(UsdGeom.XformOp.PrecisionFloat, name).Set((op[0], op[1], op[2]))
def addInvertTranslateOpIfNotEmpty(self, prim, op, name = ''):
if op != fbx.FbxVector4(0, 0, 0, -1):
prim.AddTranslateOp(UsdGeom.XformOp.PrecisionFloat, name, True)
def addRotationOpIfNotEmpty(self, prim, op, name = '', idRotation = None):
if idRotation is None:
idRotation = fbx.FbxVector4(0, 0, 0, 1)
if op != idRotation:
prim.AddRotateXYZOp(UsdGeom.XformOp.PrecisionFloat, name).Set((op[0], op[1], op[2]))
def addScalingOpIfNotEmpty(self, prim, op, name = '', idScaling = None):
if idScaling is None:
idScaling = fbx.FbxVector4(1, 1, 1, 1)
if op != idScaling:
prim.AddScaleOp(UsdGeom.XformOp.PrecisionFloat, name).Set((op[0], op[1], op[2]))
def getXformOp(self, usdGeom, type):
ops = usdGeom.GetOrderedXformOps()
for op in ops:
# find operation without suffix
if op.GetOpType() == type and len(op.GetName().split(':')) == 2:
return op
op = None
if type == UsdGeom.XformOp.TypeTranslate:
op = usdGeom.AddTranslateOp()
elif type == UsdGeom.XformOp.TypeRotateXYZ:
op = usdGeom.AddRotateXYZOp()
if type == UsdGeom.XformOp.TypeOrient:
op = usdGeom.AddOrientOp()
if type == UsdGeom.XformOp.TypeScale:
op = usdGeom.AddScaleOp()
if op is not None:
opNames = [
"xformOp:translate",
"xformOp:translate:rotationOffset",
"xformOp:translate:rotationPivot",
"xformOp:rotateXYZ:preRotation",
"xformOp:rotateXYZ",
"xformOp:rotateXYZ:postRotation",
"!invert!xformOp:translate:rotationPivot",
"xformOp:translate:scalingOffset",
"xformOp:translate:scalingPivot",
"xformOp:scale",
"!invert!xformOp:translate:scalingPivot",
]
ops = usdGeom.GetOrderedXformOps()
newOps = []
for opName in opNames:
checkInverse = False
if opName[0:8] == '!invert!':
opName = opName[8:]
checkInverse = True
for operation in ops:
if operation.GetName() == opName and operation.IsInverseOp() == checkInverse:
newOps.append(operation)
break
usdGeom.SetXformOpOrder(newOps)
return op
def setNodeTransforms(self, node, prim):
t = fbx.FbxVector4(node.LclTranslation.Get())
ro = node.GetRotationOffset(fbx.FbxNode.eSourcePivot)
rp = node.GetRotationPivot(fbx.FbxNode.eSourcePivot)
preRotation = node.GetPreRotation(fbx.FbxNode.eSourcePivot)
r = fbx.FbxVector4(node.LclRotation.Get())
postRotation = node.GetPostRotation(fbx.FbxNode.eSourcePivot)
so = node.GetScalingOffset(fbx.FbxNode.eSourcePivot)
sp = node.GetScalingPivot(fbx.FbxNode.eSourcePivot)
s = fbx.FbxVector4(node.LclScaling.Get())
# set translation
self.addTranslateOpIfNotEmpty(prim, t)
# set rotation offset, pivot and pre-post rotation ops
self.addTranslateOpIfNotEmpty(prim, ro, "rotationOffset")
self.addTranslateOpIfNotEmpty(prim, rp, "rotationPivot")
self.addRotationOpIfNotEmpty(prim, preRotation, "preRotation")
self.addRotationOpIfNotEmpty(prim, r)
self.addRotationOpIfNotEmpty(prim, postRotation, "postRotation")
self.addInvertTranslateOpIfNotEmpty(prim, -rp, "rotationPivot")
# set scaling offset & pivot
self.addTranslateOpIfNotEmpty(prim, so, "scalingOffset")
self.addTranslateOpIfNotEmpty(prim, sp, "scalingPivot")
self.addScalingOpIfNotEmpty(prim, s)
self.addInvertTranslateOpIfNotEmpty(prim, -rp, "scalingPivot")
def hasGeometricTransform(self, fbxNode):
if (fbx.FbxVector4(0, 0, 0, 1) != fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot) or
fbx.FbxVector4(0, 0, 0, 1) != fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot) or
fbx.FbxVector4(1, 1, 1, 1) != fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot)):
return True
return False
def setGeometricTransform(self, fbxNode, prim):
gt = fbxNode.GetGeometricTranslation(fbx.FbxNode.eSourcePivot)
gr = fbxNode.GetGeometricRotation(fbx.FbxNode.eSourcePivot)
gs = fbxNode.GetGeometricScaling(fbx.FbxNode.eSourcePivot)
self.addTranslateOpIfNotEmpty(prim, gt, "geometricTranslation")
self.addRotationOpIfNotEmpty(prim, gr, "geometricRotation")
self.addScalingOpIfNotEmpty(prim, gs, "geometricScaling")
def processSkeletalAnimation(self, skeletonIdx):
skeleton = self.skinning.skeletons[skeletonIdx]
framesCount = int((self.stopAnimationTime - self.startAnimationTime) * self.fps + 0.5) + 1
startFrame = int(self.startAnimationTime * self.fps + 0.5)
if framesCount == 1:
if self.verbose:
print ' no skeletal animation'
return
animationName = self.asset.getAnimationsPath() + '/' + 'SkelAnimation'
if skeletonIdx > 0:
animationName += '_' + str(skeletonIdx)
if self.verbose:
print 'Animation:', animationName
usdSkelAnim = UsdSkel.Animation.Define(self.usdStage, animationName)
translateAttr = usdSkelAnim.CreateTranslationsAttr()
rotateAttr = usdSkelAnim.CreateRotationsAttr()
scaleAttr = usdSkelAnim.CreateScalesAttr()
jointPaths = []
for fbxNode in skeleton.joints:
jointPaths.append(skeleton.jointPaths[fbxNode])
fbxAnimEvaluator = self.fbxScene.GetAnimationEvaluator()
for frame in range(framesCount):
time = frame / self.fps + self.startAnimationTime
translations = []
rotations = []
scales = []
for fbxNode in skeleton.joints:
fbxTime = fbx.FbxTime()
fbxTime.SetSecondDouble(time)
fbxMatrix = fbxAnimEvaluator.GetNodeLocalTransform(fbxNode, fbxTime)
translation = fbxMatrix.GetT()
q = fbxMatrix.GetQ()
rotation = Gf.Quatf(float(q[3]), Gf.Vec3f(float(q[0]), float(q[1]), float(q[2])))
scale = fbxMatrix.GetS()
translations.append([translation[0], translation[1], translation[2]])
rotations.append(rotation)
scales.append([scale[0], scale[1], scale[2]])
translateAttr.Set(translations, Usd.TimeCode(frame + startFrame))
rotateAttr.Set(rotations, Usd.TimeCode(frame + startFrame))
scaleAttr.Set(scales, Usd.TimeCode(frame + startFrame))
usdSkelAnim.CreateJointsAttr(jointPaths)
skeleton.setSkeletalAnimation(usdSkelAnim)
def processNodeTransformAnimation(self, fbxNode, fbxProperty, fbxAnimCurveNode, usdGeom):
fbxTimeSpan = fbx.FbxTimeSpan()
fbxAnimCurveNode.GetAnimationInterval(fbxTimeSpan)
startTime = fbxTimeSpan.GetStart().GetSecondDouble()
stopTime = fbxTimeSpan.GetStop().GetSecondDouble()
framesCount = int((stopTime - startTime) * self.fps + 0.5) + 1
if framesCount < 1:
return
startFrame = int(startTime * self.fps + 0.5)
isTranslation = False
isRotation = False
isScale = False
channelName = str(fbxProperty.GetName()).strip()
if channelName == 'Lcl Translation':
isTranslation = True
elif channelName == 'Lcl Rotation':
isRotation = True
elif channelName == 'Lcl Scaling':
isScale = True
else:
if self.verbose:
print 'Warnig: animation channel"', channelName, '"is not supported.'
fbxAnimEvaluator = self.fbxScene.GetAnimationEvaluator()
# TODO: for linear curves use key frames only
for frame in range(startFrame, startFrame + framesCount):
time = frame / self.fps + startTime
timeCode = self.asset.toTimeCode(time, True)
fbxTime = fbx.FbxTime()
fbxTime.SetSecondDouble(time)
if isTranslation:
op = self.getXformOp(usdGeom, UsdGeom.XformOp.TypeTranslate)
v = fbxNode.EvaluateLocalTranslation(fbxTime)
op.Set(time = timeCode, value = Gf.Vec3f(float(v[0]), float(v[1]), float(v[2])))
elif isRotation:
op = self.getXformOp(usdGeom, UsdGeom.XformOp.TypeRotateXYZ)
v = fbxNode.EvaluateLocalRotation(fbxTime)
op.Set(time = timeCode, value = Gf.Vec3f(float(v[0]), float(v[1]), float(v[2])))
elif isScale:
op = self.getXformOp(usdGeom, UsdGeom.XformOp.TypeScale)
v = fbxNode.EvaluateLocalScaling(fbxTime)
op.Set(time = timeCode, value = Gf.Vec3f(float(v[0]), float(v[1]), float(v[2])))
def findAnimationProperties(self, fbxObject):
animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId))
if animStacksCount < 1:
return []
animProperties = []
for animStackIdx in range(animStacksCount):
fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), animStackIdx)
for layerIdx in range(fbxAnimStack.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId))):
fbxAnimLayer = fbxAnimStack.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), layerIdx)
for curveNodeIdx in range(fbxAnimLayer.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId))):
fbxAnimCurveNode = fbxAnimLayer.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId), curveNodeIdx)
fbxTimeSpan = fbx.FbxTimeSpan()
fbxAnimCurveNode.GetAnimationInterval(fbxTimeSpan)
for propertyIdx in range(fbxAnimCurveNode.GetDstPropertyCount()):
fbxProperty = fbxAnimCurveNode.GetDstProperty(propertyIdx)
if fbxProperty.GetFbxObject() == fbxObject:
animProperty = AnimProperty(fbxAnimLayer, fbxProperty, fbxTimeSpan)
animProperties.append(animProperty)
return animProperties
def processNodeAnimations(self, fbxNode, usdGeom):
animStacksCount = self.fbxScene.GetSrcObjectCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId))
if animStacksCount < 1:
return
for animStackIdx in range(animStacksCount):
fbxAnimStack = self.fbxScene.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimStack.ClassId), animStackIdx)
for layerIdx in range(fbxAnimStack.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId))):
fbxAnimLayer = fbxAnimStack.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), layerIdx)
for curveNodeIdx in range(fbxAnimLayer.GetMemberCount(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId))):
fbxAnimCurveNode = fbxAnimLayer.GetMember(fbx.FbxCriteria.ObjectType(fbx.FbxAnimCurveNode.ClassId), curveNodeIdx)
for propertyIdx in range(fbxAnimCurveNode.GetDstPropertyCount()):
fbxProperty = fbxAnimCurveNode.GetDstProperty(propertyIdx)
fbxObject = fbxProperty.GetFbxObject()
if fbxObject == fbxNode:
self.processNodeTransformAnimation(fbxNode, fbxProperty, fbxAnimCurveNode, usdGeom)
def processNode(self, fbxNode, path, underSkeleton, indent):
nodeName = usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1])
newPath = path + '/' + nodeName
if newPath in self.nodePaths:
newPath = newPath + str(self.nodeId)
self.nodeId = self.nodeId + 1
fbxAttributeType = fbx.FbxNodeAttribute.eNone
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if fbx.FbxNodeAttribute.eSkeleton == fbxAttributeType:
if fbxNodeAttribute.IsSkeletonRoot():
skeleton = self.skinning.findSkeletonByRoot(fbxNode)
if skeleton is None:
skeleton = self.skinning.findSkeletonByJoint(fbxNode)
if skeleton is not None:
skeleton.makeUsdSkeleton(self.usdStage, newPath, self.nodeManager)
if self.verbose:
print indent + "SkelRoot:", nodeName
underSkeleton = skeleton
if underSkeleton and self.getFbxMesh(fbxNode) is not None:
self.skeletonByNode[fbxNode] = underSkeleton
elif self.getFbxSkin(fbxNode) is not None:
self.skeletonByNode[fbxNode] = None
elif self.getFbxBlenShape(fbxNode) is not None:
usdNode = self.prepareBlendShape(fbxNode, newPath)
self.setNodeTransforms(fbxNode, usdNode)
self.processNodeAnimations(fbxNode, usdNode)
else:
# if we have a geometric transformation we shouldn't propagate it to node's children
usdNode = None
hasGeometricTransform = self.hasGeometricTransform(fbxNode)
if underSkeleton is None and hasGeometricTransform and underSkeleton is None:
usdNode = UsdGeom.Xform.Define(self.usdStage, newPath)
geometryPath = newPath + '/' + nodeName + '_geometry'
else:
geometryPath = newPath
usdGeometry = None
if (fbx.FbxNodeAttribute.eMesh == fbxAttributeType or
fbx.FbxNodeAttribute.eSubDiv == fbxAttributeType):
usdGeometry = self.processMesh(fbxNode, geometryPath, underSkeleton, indent)
if underSkeleton is None:
if usdGeometry is None:
usdGeometry = UsdGeom.Xform.Define(self.usdStage, geometryPath)
self.nodePaths[newPath] = newPath
if hasGeometricTransform:
self.setNodeTransforms(fbxNode, usdNode)
self.setGeometricTransform(fbxNode, usdGeometry)
self.processNodeAnimations(fbxNode, usdNode)
else:
self.setNodeTransforms(fbxNode, usdGeometry)
self.processNodeAnimations(fbxNode, usdGeometry)
# process child nodes recursively
if underSkeleton is not None:
newPath = path # keep meshes directly under SkelRoot scope
for childIdx in xrange(fbxNode.GetChildCount()):
self.processNode(fbxNode.GetChild(childIdx), newPath, underSkeleton, indent + ' ')
def populateSkeletons(self, fbxNode):
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if fbx.FbxNodeAttribute.eSkeleton == fbxAttributeType:
if fbxNodeAttribute.IsSkeletonRoot():
self.skinning.createSkeleton(fbxNode)
for childIdx in xrange(fbxNode.GetChildCount()):
self.populateSkeletons(fbxNode.GetChild(childIdx))
def findSkelRoot(self, fbxNode):
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if fbx.FbxNodeAttribute.eSkeleton == fbxAttributeType:
if fbxNodeAttribute.IsSkeletonRoot():
return fbxNode
fbxParentNode = fbxNode.GetParent()
if fbxParentNode is not None:
return self.findSkelRoot(fbxParentNode)
return None
def populateSkins(self, fbxNode):
fbxNodeAttribute = fbxNode.GetNodeAttribute()
if fbxNodeAttribute:
fbxAttributeType = fbxNodeAttribute.GetAttributeType()
if (fbx.FbxNodeAttribute.eMesh == fbxAttributeType or
fbx.FbxNodeAttribute.eSubDiv == fbxAttributeType):
fbxMesh = fbxNode.GetNodeAttribute()
for i in range(fbxMesh.GetDeformerCount(fbx.FbxDeformer.eSkin)):
fbxSkin = fbxMesh.GetDeformer(i, fbx.FbxDeformer.eSkin)
# try to find skeleton root (.eSkeleton) in parent nodes
root = self.findSkelRoot(fbxSkin.GetCluster(0).GetLink()) if fbxSkin.GetClusterCount() > 0 else None
skin = usdUtils.Skin(root)
for clusterIdx in range(fbxSkin.GetClusterCount()):
fbxCluster = fbxSkin.GetCluster(clusterIdx)
fbxJointNode = fbxCluster.GetLink()
skin.joints.append(fbxJointNode)
linkWorldTransform = fbx.FbxAMatrix()
linkWorldTransform = fbxCluster.GetTransformLinkMatrix(linkWorldTransform)
skin.bindMatrices[fbxJointNode] = GfMatrix4dWithFbxMatrix(linkWorldTransform)
self.skinning.skins.append(skin)
self.fbxSkinToSkin[fbxSkin] = skin
for childIdx in xrange(fbxNode.GetChildCount()):
self.populateSkins(fbxNode.GetChild(childIdx))
def processSkinning(self):
self.populateSkeletons(self.fbxScene.GetRootNode())
self.populateSkins(self.fbxScene.GetRootNode())
self.skinning.createSkeletonsFromSkins()
if self.verbose:
if len(self.skinning.skeletons) > 0:
print " Found skeletons:", len(self.skinning.skeletons), "with", len(self.skinning.skins), "skin(s)"
def processSkinnedMeshes(self):
for fbxNode, skeleton in self.skeletonByNode.iteritems():
fbxSkin = self.getFbxSkin(fbxNode)
if skeleton is None:
if fbxSkin is None:
continue
skin = self.fbxSkinToSkin[fbxSkin]
skeleton = skin.skeleton
nodeName = usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1])
newPath = skeleton.sdfPath + '/' + nodeName
if newPath in self.nodePaths:
newPath = newPath + str(self.nodeId)
self.nodeId = self.nodeId + 1
self.nodePaths[newPath] = newPath
self.processMesh(fbxNode, newPath, skeleton, '')
def processSkeletalAnimations(self):
for skeletonIdx in range(len(self.skinning.skeletons)):
self.processSkeletalAnimation(skeletonIdx)
def prepareBlendShape(self, fbxNode, path):
fbxBlendShape = self.getFbxBlenShape(fbxNode)
blendShape = self.shapeBlending.createBlendShape(0)
self.blendShapeByNode[fbxNode] = blendShape
return blendShape.makeUsdSkeleton(self.usdStage, path)
def processBlendShapes(self):
for fbxNode, blendShape in self.blendShapeByNode.iteritems():
nodeName = usdUtils.makeValidIdentifier(fbxNode.GetName().split(":")[-1])
newPath = blendShape.sdfPath + '/' + nodeName
if newPath in self.nodePaths:
newPath = newPath + str(self.nodeId)
self.nodeId = self.nodeId + 1
self.nodePaths[newPath] = newPath
usdMesh = self.processMesh(fbxNode, newPath, None, '')
fbxMesh = fbxNode.GetNodeAttribute()
if fbx.FbxNodeAttribute.eSubDiv == fbxMesh.GetAttributeType():
fbxMesh = fbxMesh.GetBaseMesh()
points = [Gf.Vec3f(p[0], p[1], p[2]) for p in fbxMesh.GetControlPoints()]
blendShapes = []
blendShapeTargets = []
index = 0;
fbxBlendShape = self.getFbxBlenShape(fbxNode)
for i in range(fbxBlendShape.GetBlendShapeChannelCount()):
fbxBlendShapeChannel = fbxBlendShape.GetBlendShapeChannel(i)
for j in range(fbxBlendShapeChannel.GetTargetShapeCount()):
fbxShape = fbxBlendShapeChannel.GetTargetShape(j)
blendShapeName = "blendShape" + str(index)
index += 1
blendShapeTarget = newPath + "/" + blendShapeName
blendShapeName = self.asset.makeUniqueBlendShapeName(blendShapeName, newPath)
blendShapes.append(blendShapeName)
blendShapeTargets.append(blendShapeTarget)
usdBlendShape = UsdSkel.BlendShape.Define(self.usdStage, blendShapeTarget)
if fbxShape.GetControlPointsCount():
offsets = []
pointIndices = []
for k in range(fbxShape.GetControlPointsCount()):
point = fbxShape.GetControlPointAt(k)
if points[k][0] - point[0] != 0 or points[k][1] - point[1] or points[k][2] - point[2]:
offsets.append(Gf.Vec3f(point[0] - points[k][0], point[1] - points[k][1], point[2] - points[k][2]))
pointIndices.append(k)
usdBlendShape.CreateOffsetsAttr(offsets)
usdBlendShape.CreatePointIndicesAttr(pointIndices)
usdSkelBlendShapeBinding = UsdSkel.BindingAPI(usdMesh)
usdSkelBlendShapeBinding.CreateBlendShapesAttr(blendShapes)
usdSkelBlendShapeBinding.CreateBlendShapeTargetsRel().SetTargets(blendShapeTargets)
UsdSkel.BindingAPI.Apply(usdMesh.GetPrim());
blendShape.addBlendShapeList(blendShapes)
def processBlendShapeAnimations(self):
framesCount = int((self.stopAnimationTime - self.startAnimationTime) * self.fps + 0.5) + 1
startFrame = int(self.startAnimationTime * self.fps + 0.5)
if framesCount == 1:
return
blendShapeIdx = 0
for fbxNode, blendShape in self.blendShapeByNode.iteritems():
fbxBlendShape = self.getFbxBlenShape(fbxNode)
animationName = self.asset.getAnimationsPath() + '/' + 'BlenShapeAnim'
if blendShapeIdx > 0:
animationName += '_' + str(blendShapeIdx)
if self.verbose:
print 'Animation:', animationName
blendShapeIdx += 1
usdSkelAnim = UsdSkel.Animation.Define(self.usdStage, animationName)
attr = usdSkelAnim.CreateBlendShapeWeightsAttr()
for frame in range(framesCount):
time = frame / self.fps + self.startAnimationTime
values = []
for i in range(fbxBlendShape.GetBlendShapeChannelCount()):
fbxBlendShapeChannel = fbxBlendShape.GetBlendShapeChannel(i)
animProperties = self.findAnimationProperties(fbxBlendShapeChannel)
for animProperty in animProperties:
#channelName = str(fbxProperty.GetName()).strip()
fbxMesh = fbxNode.GetNodeAttribute()
if fbx.FbxNodeAttribute.eSubDiv == fbxMesh.GetAttributeType():
fbxMesh = fbxMesh.GetBaseMesh()
fbxTime = fbx.FbxTime()
fbxTime.SetSecondDouble(time)
fbxAnimCurve = animProperty.fbxProperty.GetCurve(animProperty.fbxAnimLayer)
values.append(fbxAnimCurve.Evaluate(fbxTime)[0] / 100.0) # in percent
attr.Set(time = Usd.TimeCode(frame + startFrame), value = values)
blendShape.setSkeletalAnimation(usdSkelAnim)
self.shapeBlending.flush()
def makeUsdStage(self):
self.usdStage = self.asset.makeUsdStage()
# axis system for USD should be Y-up, odd-forward, and right-handed
sceneAxisSystem = self.fbxScene.GetGlobalSettings().GetAxisSystem()
axisSystem = fbx.FbxAxisSystem(fbx.FbxAxisSystem.EUpVector(fbx.FbxAxisSystem.eYAxis),
fbx.FbxAxisSystem.EFrontVector(fbx.FbxAxisSystem.eParityOdd),
fbx.FbxAxisSystem.ECoordSystem(fbx.FbxAxisSystem.eRightHanded))
if sceneAxisSystem != axisSystem:
if self.verbose:
print(" converting to Y-up, odd-forward, and right-handed axis system")
axisSystem.ConvertScene(self.fbxScene)
systemUnit = self.fbxScene.GetGlobalSettings().GetSystemUnit()
if systemUnit != fbx.FbxSystemUnit.cm: # cm is default for USD and FBX
fbxMetersPerUnit = 0.01
metersPerUnit = systemUnit.GetScaleFactor() * fbxMetersPerUnit
if self.legacyModifier is not None and self.legacyModifier.getMetersPerUnit() == 0:
self.legacyModifier.setMetersPerUnit(metersPerUnit)
else:
self.usdStage.SetMetadata("metersPerUnit", metersPerUnit)
self.processMaterials()
self.processSkinning()
self.prepareAnimations()
self.processNode(self.fbxScene.GetRootNode(), self.asset.getGeomPath(), None, '')
self.processSkeletalAnimations()
self.processSkinnedMeshes()
self.processBlendShapes()
self.processBlendShapeAnimations()
self.asset.finalize()
return self.usdStage
def usdStageWithFbx(fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose):
if usdStageWithFbxLoaded == False:
return None
try:
fbxConverter = FbxConverter(fbxPath, usdPath, legacyModifier, copyTextures, searchPaths, verbose)
return fbxConverter.makeUsdStage()
except ConvertError:
return None
except:
raise
return None
| 45.168156 | 176 | 0.635229 | 4,786 | 55,602 | 7.372336 | 0.168199 | 0.006348 | 0.006377 | 0.013009 | 0.318643 | 0.261308 | 0.204229 | 0.16866 | 0.162056 | 0.160129 | 0 | 0.007939 | 0.275116 | 55,602 | 1,230 | 177 | 45.204878 | 0.867485 | 0.026798 | 0 | 0.27044 | 0 | 0.001048 | 0.025522 | 0.004808 | 0 | 0 | 0 | 0.000813 | 0 | 0 | null | null | 0.002096 | 0.016771 | null | null | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3f6b95561ed162423b6adee3e5e40b725abe8dde | 3,291 | py | Python | modules/ghautoknit/EmbeddedConstraint.py | fstwn/ghautokn | 5ca6d07df601d34be5a67fe6c76a942daef50a85 | [
"MIT"
] | 2 | 2021-02-19T19:55:21.000Z | 2021-10-13T23:55:56.000Z | modules/ghautoknit/EmbeddedConstraint.py | fstwn/ghautoknit | 5ca6d07df601d34be5a67fe6c76a942daef50a85 | [
"MIT"
] | null | null | null | modules/ghautoknit/EmbeddedConstraint.py | fstwn/ghautoknit | 5ca6d07df601d34be5a67fe6c76a942daef50a85 | [
"MIT"
] | null | null | null | # PYTHON STANDARD LIBRARY IMPORTS ----------------------------------------------
from __future__ import absolute_import
from __future__ import division
# LOCAL MODULE IMPORTS ---------------------------------------------------------
from ghautoknit.StoredConstraint import StoredConstraint
# ALL LIST ---------------------------------------------------------------------
__all__ = [
"EmbeddedConstraint"
]
# ACTUAL CLASS -----------------------------------------------------------------
class EmbeddedConstraint(object):
"""
Class for representing an autoknit constraint in relation to the model.
The chain is only stored as vertex indices.
"""
def __init__(self, chain, value, radius):
"""Create a new autoknit Constraint."""
self._set_chain(chain)
self._set_value(value)
self._set_radius(radius)
def ToString(self):
name = "Autoknit EmbeddedConstraint"
data = "({}, {}, {})".format(self.Chain, self.Value, self.Radius)
return name + data
# BASE PROPERTIES ----------------------------------------------------------
# CHAIN PROPERTY -----------------------------------------------------------
def _get_chain(self):
return self._chain
def _set_chain(self, chain):
if type(chain) != list:
raise RuntimeError("Expected list of vertex indices as chain!")
try:
for i, item in enumerate(chain):
chain[i] = int(item)
except:
raise RuntimeError("Some of the indices in the given chain " + \
"failed to convert to integers!")
self._chain = chain
Chain = property(_get_chain, _set_chain, None,
"The chain of points of the constraint.")
# TIME VALUE PROPERTY ------------------------------------------------------
def _get_value(self):
return self._value
def _set_value(self, value):
try:
value = float(value)
except Exception, e:
raise RuntimeError("Failed to set time value for constraint " + \
"{} // {}".format(str(self), e))
self._value = value
Value = property(_get_value, _set_value, None,
"The time value of the constraint.")
# RADIUS PROPERTY ----------------------------------------------------------
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
try:
radius = float(radius)
except Exception, e:
raise RuntimeError("Failed to set radius for constraint " + \
"{} // {}".format(str(self), e))
self._radius = radius
Radius = property(_get_radius, _set_radius, None,
"The radius of the constraint.")
# CONVERT CONSTRAINT FOR STORAGE -------------------------------------------
def _get_storable(self):
count = len(self.Chain)
storable = (count, self.Value, self.Radius)
return storable
Storable = property(_get_storable, None, None,
"A storable version of this constraint.")
# MAIN -------------------------------------------------------------------------
if __name__ == '__main__':
pass
| 35.387097 | 80 | 0.485567 | 298 | 3,291 | 5.144295 | 0.288591 | 0.035225 | 0.027397 | 0.024788 | 0.130463 | 0.097847 | 0.097847 | 0.057404 | 0 | 0 | 0 | 0 | 0.263446 | 3,291 | 92 | 81 | 35.771739 | 0.632426 | 0.233668 | 0 | 0.118644 | 0 | 0 | 0.173299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.016949 | 0.050847 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3f730b00ede0a815c4c62737f803ff84e093f24f | 3,124 | py | Python | Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py | rtobar/askapsoft | 6bae06071d7d24f41abe3f2b7f9ee06cb0a9445e | [
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | 1 | 2020-06-18T08:37:43.000Z | 2020-06-18T08:37:43.000Z | Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py | ATNF/askapsoft | d839c052d5c62ad8a511e58cd4b6548491a6006f | [
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | Code/Components/Synthesis/testdata/current/simulation/synthregression/wtermtest.py | ATNF/askapsoft | d839c052d5c62ad8a511e58cd4b6548491a6006f | [
"BSL-1.0",
"Apache-2.0",
"OpenSSL"
] | null | null | null | # regression tests with gridders taking w-term into account
# some fixed parameters are given in wtermtest_template.in
from synthprogrunner import *
def analyseResult(spr, checkWeights=True):
'''
spr - synthesis program runner (to run imageStats)
throws exceptions if something is wrong, otherwise just
returns
'''
src_offset = 0.006/math.pi*180.
psf_peak=[-172.5,-45]
true_peak=sinProjection(psf_peak,src_offset,src_offset)
stats = spr.imageStats('image.field1.restored')
print "Statistics for restored image: ",stats
disterr = getDistance(stats,true_peak[0],true_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
if abs(stats['peak']-1.)>0.1:
raise RuntimeError, "Peak flux in the image is notably different from 1 Jy, F=%f" % stats['peak']
stats = spr.imageStats('image.field1')
print "Statistics for modelimage: ",stats
disterr = getDistance(stats,true_peak[0],true_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
stats = spr.imageStats('psf.field1')
print "Statistics for psf image: ",stats
disterr = getDistance(stats,psf_peak[0],psf_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
stats = spr.imageStats('psf.image.field1')
print "Statistics for preconditioned psf image: ",stats
disterr = getDistance(stats,psf_peak[0],psf_peak[1])*3600.
if disterr > 8:
raise RuntimeError, "Offset between true and expected position exceeds 1 cell size (8 arcsec), d=%f, true_peak=%s" % (disterr,true_peak)
if abs(stats['peak']-1.)>0.01:
raise RuntimeError, "Peak flux in the preconditioned psf image is notably different from 1.0, F=%f" % stats['peak']
if checkWeights:
stats = spr.imageStats('weights.field1')
print "Statistics for weight image: ",stats
if abs(stats['rms']-stats['peak'])>0.1 or abs(stats['rms']-stats['median'])>0.1 or abs(stats['peak']-stats['median'])>0.1:
raise RuntimeError, "Weight image is expected to be constant for WProject and WStack gridders"
stats = spr.imageStats('residual.field1')
print "Statistics for residual image: ",stats
if stats['rms']>0.01 or abs(stats['median'])>0.0001:
raise RuntimeError, "Residual image has too high rms or median. Please verify"
spr = SynthesisProgramRunner(template_parset = 'wtermtest_template.in')
spr.runSimulator()
spr.addToParset("Cimager.gridder = WProject")
spr.runImager()
analyseResult(spr)
spr.initParset()
spr.addToParset("Cimager.gridder = WStack")
spr.runImager()
analyseResult(spr)
spr.initParset()
spr.addToParset("Cimager.gridder = WProject")
spr.addToParset("Cimager.gridder.snapshotimaging = true")
spr.addToParset("Cimager.gridder.snapshotimaging.wtolerance = 500")
spr.runImager()
analyseResult(spr,False)
| 42.216216 | 142 | 0.717029 | 438 | 3,124 | 5.057078 | 0.262557 | 0.046953 | 0.048758 | 0.054176 | 0.565237 | 0.467269 | 0.393679 | 0.393679 | 0.393679 | 0.393679 | 0 | 0.031108 | 0.15621 | 3,124 | 73 | 143 | 42.794521 | 0.809181 | 0.036492 | 0 | 0.396226 | 0 | 0.075472 | 0.397834 | 0.040168 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.018868 | null | null | 0.113208 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58b28e8645b762a35a626046be48d346a6bd215a | 15,595 | py | Python | test/test_views.py | Nemoden/Simblin | 1f97a985125023e64dfc6f4db6292cf3a2b904c9 | [
"BSD-3-Clause"
] | 53 | 2015-02-01T14:06:48.000Z | 2022-01-02T15:46:00.000Z | test/test_views.py | Aaron1992/Simblin | 1f97a985125023e64dfc6f4db6292cf3a2b904c9 | [
"BSD-3-Clause"
] | null | null | null | test/test_views.py | Aaron1992/Simblin | 1f97a985125023e64dfc6f4db6292cf3a2b904c9 | [
"BSD-3-Clause"
] | 23 | 2015-01-04T08:11:27.000Z | 2019-11-24T13:18:25.000Z | # -*- coding: utf-8 -*-
"""
Simblin Test Views
~~~~~~~~~~~~~~~~~~
Test the different views of the blogging application.
:copyright: (c) 2010 by Eugen Kiss.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import datetime
import flask
from simblin.extensions import db
from simblin.models import Post, Tag, Category, post_tags, post_categories, Admin
from nose.tools import assert_equal, assert_true, assert_false
from test import TestCase
class ViewTestCase(TestCase):
"""Base TestClass for views"""
def register(self, username, password, password2='', email=''):
"""Helper function to register a user"""
return self.client.post('/register', data=dict(
username=username,
password=password,
password2=password2,
email=email,
), follow_redirects=True)
def login(self, username, password):
"""Helper function to login"""
return self.client.post('/login', data=dict(
username=username,
password=password
), follow_redirects=True)
def register_and_login(self, username, password):
"""Register and login in one go"""
self.register(username, password, password)
self.login(username, password)
def logout(self):
"""Helper function to logout"""
return self.client.get('/logout', follow_redirects=True)
def add_post(self, title, markup='', comments_allowed=None, visible=None,
tags='', categories=[]):
"""Helper functions to create a blog post"""
data=dict(
title=title,
markup=markup,
tags=tags,
action='Publish',
)
if comments_allowed is not None:
data['comments_allowed'] = True
if visible is not None:
data['visible'] = True
# Mimic select form fields
for i, category_id in enumerate(categories):
data['category-%d' % i] = category_id
return self.client.post('/compose', data=data, follow_redirects=True)
def update_post(self, slug, title, markup='', comments_allowed=None,
visible=None, tags=None, categories=[]):
"""Helper functions to create a blog post"""
data=dict(
title=title,
markup=markup,
tags=tags,
action='Update',
)
if comments_allowed is not None:
data['comments_allowed'] = True
if visible is not None:
data['visible'] = True
# Mimic select form fields
for i, category_id in enumerate(categories):
data['category-%d' % i] = category_id
return self.client.post('/update/%s' % slug, data=data,
follow_redirects=True)
def delete_post(self, slug):
"""Helper function to delete a blog post"""
return self.client.post('/_delete/%s' % slug, data=dict(next=''),
follow_redirects=True)
def add_category(self, name):
"""Register category in the database and return its id"""
return flask.json.loads(
self.client.post('/_add_category', data=dict(name=name)).data)['id']
def delete_category(self, id):
return self.client.post('/_delete_category', data=dict(id=id))
class TestRegistration(ViewTestCase):
def test_validation(self):
"""Test form validation"""
self.clear_db()
rv = self.register('', 'password')
assert 'You have to enter a username' in rv.data
rv = self.register('britney spears', '')
assert 'You have to enter a password' in rv.data
rv = self.register('barney', 'abv', 'abc')
assert 'Passwords must match' in rv.data
def test_registration(self):
"""Test successful registration and automatic login"""
self.clear_db()
with self.client:
rv = self.register('barney', 'abc', 'abc')
assert 'You are the new master of this blog' in rv.data
assert flask.session['logged_in']
def test_reregistration(self):
"""Test that only one admin can exist at a time and reregistration
with new credentials only works when logged in"""
self.clear_db()
rv = self.register('barney', 'abc', 'abc')
self.logout()
rv = self.register('barney', 'abc', 'abc')
assert 'There can only be one admin' in rv.data
self.login('barney', 'abc')
rv = self.register('moe', 'ugly', 'ugly') # clears the admin
rv = self.register('moe', 'ugly', 'ugly')
assert 'You are the new master of this blog' in rv.data
assert_equal(Admin.query.count(), 1)
class TestLogin(ViewTestCase):
def test_validation(self):
"""Test form validation"""
self.clear_db()
self.register('barney', 'abc', 'abc')
rv = self.login('borney', 'abc')
assert 'Invalid username' in rv.data
rv = self.login('barney', 'abd')
assert 'Invalid password' in rv.data
def test_login_logout(self):
"""Test logging in and out"""
self.clear_db()
self.register('barney', 'abc', 'abc')
with self.client:
rv = self.login('barney', 'abc')
assert 'You have been successfully logged in' in rv.data
assert flask.session['logged_in']
rv = self.logout()
assert 'You have been successfully logged out' in rv.data
assert 'logged_in' not in flask.session
class TestPost(ViewTestCase):
"""Tags and categories are tested alongside"""
def test_validation(self):
"""Check if form validation and validation in general works"""
self.clear_db()
self.register_and_login('barney', 'abc')
rv = self.add_post(title='', markup='a', tags='b')
assert 'You must provide a title' in rv.data
rv = self.update_post(title='a', markup='', tags='', slug='999x00')
assert 'Invalid slug' in rv.data
rv = self.add_post(title='a', markup='', tags='')
assert 'New post was successfully posted' in rv.data
def test_creation(self):
"""Test the blog post's fields' correctness after adding an
post and test proper category association"""
self.clear_db()
self.register_and_login('barney', 'abc')
title = "My post"
markup = "# Title"
tags = "django, franz und bertha,vil/bil"
category1_id = self.add_category('cool')
category2_id = self.add_category('cooler')
self.add_post(title=title, markup=markup, tags=tags,
categories=[category1_id, category1_id, category2_id])
post = Post.query.get(1)
post_tagnames = [tag.name for tag in post.tags]
category_names = [x.name for x in post.categories]
assert_equal(post.id, 1)
assert_equal(post.title, title)
assert_equal(post.markup, markup)
assert_false(post.comments_allowed)
assert_false(post.visible)
assert_equal(post.slug, 'my-post')
assert '<h1>Title</h1>' in post.html
assert_equal(post.datetime.date(), datetime.date.today())
assert_equal(sorted(post_tagnames),
sorted(['django','franz-und-bertha','vil-bil']))
assert_equal(sorted(category_names), sorted(['cool', 'cooler']))
assert_equal(Tag.query.count(), 3)
assert_equal(Category.query.count(), 2)
assert_equal(db.session.query(post_tags).count(), 3)
# Expect only two mappings although the mapping to category1
# has been added twice
assert_equal(db.session.query(post_categories).count(), 2)
# Add another post
self.add_post(title=post.title, tags=['django'], comments_allowed=True,
visible=True)
post2 = Post.query.get(2)
assert_equal(post2.title, post.title)
assert_true(post2.comments_allowed)
assert_true(post2.visible)
assert_equal(post2.slug, post.slug + '-2')
assert_equal(post2.categories, [])
assert_equal(Tag.query.count(), 3)
return post
def test_updating(self):
"""Test the blog post's fields' correctness after updating a post and
test the proper creation and automatic tidying of tags and tag
mappings and category associations"""
post = self.test_creation()
datetime = post.datetime
self.update_post(title='cool', markup='## Title', slug=post.slug,
tags=['django'], comments_allowed=True, visible=True)
updated_post = Post.query.get(1)
assert_equal(updated_post.title, 'cool')
assert_equal(updated_post.markup, '## Title')
assert_true(updated_post.comments_allowed)
assert_true(updated_post.visible)
assert_equal(updated_post.slug, 'cool')
assert '<h2>Title</h2>' in updated_post.html
assert_equal(updated_post.datetime, datetime)
assert_equal([x.name for x in updated_post.tags], ['django'])
# Expect two rows in the posts table because two posts were
# created and one updated. Expect only one row in the tags table
# because only 'django' is used as a tag.
assert_equal(Post.query.count(), 2)
assert_equal(Tag.query.count(), 1)
# Because there are two post with a tag expect two rows
# in the post_tag association table
assert_equal(db.session.query(post_tags).count(), 2)
# Because there is no post in a category anymore expect not rows
# in the post_categories association table
assert_equal(db.session.query(post_categories).count(), 0)
def test_deletion(self):
"""Test the deletion of a blog post and the accompanying deletion of
tags"""
self.clear_db()
self.register_and_login('barney', 'abc')
self.add_post(title='Title', markup='', tags='cool')
posts = Post.query.all()
tags = Tag.query.all()
assert_equal(len(posts), 1)
assert_equal(len(tags), 1)
rv = self.delete_post(slug='idontexist')
assert 'No such post' in rv.data
rv = self.delete_post(slug='title')
assert 'Post deleted' in rv.data
posts = Post.query.all()
tags = Tag.query.all()
assert_equal(len(posts), 0)
assert_equal(len(tags), 0)
def test_singleview(self):
"""Test the displaying of one blog post"""
self.clear_db()
self.register_and_login('barney', 'abc')
self.add_post(title='Title', markup='', visible=True)
rv = self.client.get('/post/title')
self.assert_200(rv)
assert 'Title' in rv.data
self.add_post(title='Title2', visible=None)
rv = self.client.get('/post/title2')
self.assert_200(rv)
assert 'Title2' in rv.data
self.logout()
rv = self.client.get('/post/title')
self.assert_200(rv)
assert 'Title' in rv.data
rv = self.client.get('/post/title2')
self.assert_404(rv)
def test_multipleview(self):
"""Test the displaying of multiple blog posts on home page"""
self.clear_db()
self.register_and_login('barney', 'abc')
self.add_post(title='Title', markup='', visible=True)
self.add_post(title='Title2', visible=None)
self.logout()
rv = self.client.get('/')
self.assert_200(rv)
assert 'Title' in rv.data
assert 'Title2' not in rv.data
class TestArchives(ViewTestCase):
def test_archives_page(self):
"""Test the displaying of the archives page"""
self.clear_db()
rv = self.client.get('/archives/')
self.assert_200(rv)
def test_month_view(self):
"""Test the displaying of the month view"""
self.clear_db()
self.register_and_login('barney', 'abc')
post = Post('the chronic 2001', visible=False)
post.datetime = datetime.datetime(1999, 11, 16)
db.session.add(post)
db.session.commit()
rv = self.client.get('/1999/11/')
self.assert_200(rv)
assert 'the chronic 2001' in rv.data
rv = self.client.get('/7777/12/')
assert 'No entries here so far' in rv.data
rv = self.client.get('/1999/14/')
self.assert_404(rv)
self.logout()
rv = self.client.get('/1999/11/')
self.assert_200(rv)
assert 'No entries here so far' in rv.data
class TestTag(ViewTestCase):
def test_view(self):
"""Test the displaying of the tag view"""
self.clear_db()
self.register_and_login('barney', 'abc')
tag = Tag('drdre')
db.session.add(tag)
db.session.commit()
post = Post('the chronic 2001', visible=True)
post2 = Post('the chronic 2002', visible=False)
post._tags = [tag]
post2._tags = [tag]
db.session.add(post)
db.session.add(post2)
db.session.commit()
rv = self.client.get('/tag/drdre/')
self.assert_200(rv)
assert 'the chronic 2001' in rv.data
rv = self.client.get('/tag/bobbybrown/')
self.assert_404(rv)
self.logout()
rv = self.client.get('/tag/drdre/')
self.assert_200(rv)
assert 'the chronic 2001' in rv.data
assert 'the chronic 2002' not in rv.data
class TestCategory(ViewTestCase):
def test_view(self):
"""Test the displaying of the category view"""
self.clear_db()
self.register_and_login('barney', 'abc')
category = Category('drdre')
db.session.add(category)
db.session.commit()
post = Post('the chronic', visible=True)
post2 = Post('the chrinoc', visible=False)
post._categories = [category]
post2._categories = [category]
db.session.add(post)
db.session.add(post2)
db.session.commit()
rv = self.client.get('/category/drdre/')
self.assert_200(rv)
assert 'the chronic' in rv.data
rv = self.client.get('/category/sugeknight/')
self.assert_404(rv)
self.logout()
rv = self.client.get('/category/drdre/')
self.assert_200(rv)
assert 'the chronic' in rv.data
assert 'the chrinoc' not in rv.data
rv = self.client.get('/uncategorized/')
self.assert_200(rv)
assert 'Uncategorized posts' in rv.data
post2 = Post('dancing in the moonlight')
db.session.add(post2)
db.session.commit()
rv = self.client.get('/uncategorized/')
self.assert_200(rv)
assert 'dancing in the moonlight' in rv.data
def test_deletion_view(self):
"""Test if deletion works properly"""
self.clear_db()
self.register_and_login('barney', 'abc')
category = Category('drdre')
db.session.add(category)
db.session.commit()
assert_equal(Category.query.count(), 1)
rv = self.delete_category(1)
print rv
assert_equal(Category.query.count(), 0)
| 35.93318 | 81 | 0.58833 | 1,921 | 15,595 | 4.673087 | 0.140552 | 0.026067 | 0.027626 | 0.030077 | 0.53236 | 0.451264 | 0.372619 | 0.334967 | 0.282723 | 0.254205 | 0 | 0.015946 | 0.292273 | 15,595 | 433 | 82 | 36.016166 | 0.797409 | 0.034691 | 0 | 0.456026 | 0 | 0 | 0.116485 | 0.001565 | 0 | 0 | 0 | 0 | 0.296417 | 0 | null | null | 0.039088 | 0.022801 | null | null | 0.003257 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58b4a5438c1537dcf99f56657476da7aa2cae99e | 4,166 | py | Python | hue.py | desheffer/hue-adapter | 724e296c8dd52302c0380a58f4390fc3059705dc | [
"MIT"
] | null | null | null | hue.py | desheffer/hue-adapter | 724e296c8dd52302c0380a58f4390fc3059705dc | [
"MIT"
] | null | null | null | hue.py | desheffer/hue-adapter | 724e296c8dd52302c0380a58f4390fc3059705dc | [
"MIT"
] | null | null | null | from config import Config
import flask
import json
import os
from ssdp import SSDP
from threading import Thread
import urllib3
config = None
config_file_paths = [
os.path.dirname(os.path.realpath(__file__)) + "/config/default.cfg.local",
"/etc/hue-adapter/default.cfg.local",
]
for config_file_path in config_file_paths:
if os.path.isfile(config_file_path):
config = Config(file(config_file_path))
if not config:
print "Cannot find configuration file"
exit(1)
app = flask.Flask(__name__)
@app.route("/setup.xml")
def get_setup_file():
"""Serve the SSDP setup file."""
out = "<?xml version=\"1.0\"?>\n" + \
"<root xmlns=\"urn:schemas-upnp-org:device-1-0\">\n" + \
"<specVersion>\n" + \
"<major>1</major>\n" + \
"<minor>0</minor>\n" + \
"</specVersion>\n" + \
"<URLBase>http://%s:%d/</URLBase>\n" % (config.web.addr, config.web.port) + \
"<device>\n" + \
"<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>\n" + \
"<friendlyName>Philips Hue Emulator</friendlyName>\n" + \
"<manufacturer>Royal Philips Electronics</manufacturer>\n" + \
"<manufacturerURL></manufacturerURL>\n" + \
"<modelDescription>Philips Hue Emulator</modelDescription>\n" + \
"<modelName>Philips hue bridge 2012</modelName>\n" + \
"<modelNumber>929000226503</modelNumber>\n" + \
"<modelURL></modelURL>\n" + \
"<serialNumber>00000000000000000001</serialNumber>\n" + \
"<UDN>uuid:776c1cbc-790a-425f-a890-a761ec57513c</UDN>\n" + \
"</device>\n" + \
"</root>\n"
return flask.Response(out, mimetype="text/xml")
@app.route("/api/<username>/lights", methods=["GET"])
def get_all_lights(username):
"""Get all lights"""
out = {}
for id, light in config.lights.iteritems():
out[id] = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"type": "Extended color light",
"name": light["name"],
"modelid": "LCT001",
"swversion": "6609461",
"pointsymbol": {},
}
return flask.jsonify(out)
@app.route("/api/<username>/lights/<id>", methods=["GET"])
def get_light(username, id):
"""Get light attributes and state"""
if id in config.lights:
light = config.lights[id]
else:
return "", 3
out = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"type": "Extended color light",
"name": light["name"],
"modelid": "LCT001",
"swversion": "6609461",
"pointsymbol": {},
}
return flask.jsonify(out)
@app.route("/api/<username>/lights/<id>/state", methods=["PUT"])
def set_lights_state(username, id):
"""Set light state"""
if id in config.lights:
light = config.lights[id]
else:
return "", 3
data = flask.request.get_json(force=True)
if not data or "on" not in data:
return "", 6
if data["on"]:
url = light["on_url"]
else:
url = light["off_url"]
try:
http = urllib3.PoolManager()
r = http.request("GET", url)
except:
return "", 901
out = [
{
"success": {
"/lights/" + id + "/state/on": data["on"]
}
}
]
return flask.Response(json.dumps(out), mimetype="text/json")
if __name__ == "__main__":
ssdp = SSDP(config.web.addr, config.web.port)
ssdp_thread = Thread(target=ssdp.run)
ssdp_thread.setDaemon(True)
ssdp_thread.start()
app.run(host=config.web.addr, port=config.web.port)
| 26.877419 | 87 | 0.520163 | 455 | 4,166 | 4.676923 | 0.325275 | 0.028195 | 0.019737 | 0.026786 | 0.292763 | 0.259399 | 0.234962 | 0.234962 | 0.234962 | 0.234962 | 0 | 0.036383 | 0.307249 | 4,166 | 154 | 88 | 27.051948 | 0.70097 | 0 | 0 | 0.346774 | 0 | 0 | 0.288637 | 0.132364 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.056452 | null | null | 0.008065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58b8667325936944d69237ad194f47d738bc7912 | 831 | py | Python | olha_boca/infratores/admin.py | Perceu/olha-boca | 022ff941d6bd20bb79bd1e66cd293dd2f59bf55b | [
"MIT"
] | null | null | null | olha_boca/infratores/admin.py | Perceu/olha-boca | 022ff941d6bd20bb79bd1e66cd293dd2f59bf55b | [
"MIT"
] | null | null | null | olha_boca/infratores/admin.py | Perceu/olha-boca | 022ff941d6bd20bb79bd1e66cd293dd2f59bf55b | [
"MIT"
] | 1 | 2022-02-20T18:43:45.000Z | 2022-02-20T18:43:45.000Z | from django.contrib import admin
from olha_boca.infratores.models import Infratores
# Register your models here.
class InfratoresAdmin(admin.ModelAdmin):
list_display = ('nome', 'infracoes_a_pagar', 'total_infracoes', 'valor_a_pagar')
@admin.display(empty_value='???')
def total_infracoes(self, obj):
return obj.infracoes.count()
@admin.display(empty_value='???')
def infracoes_a_pagar(self, obj):
return obj.infracoes.filter(paga=False).count()
@admin.display(empty_value='???')
def valor_a_pagar(self, obj):
total = 0
infracoes_a_pagar = obj.infracoes.filter(paga=False).all()
for inf in infracoes_a_pagar:
total += (inf.tipo.vibs * inf.tipo.multiplicador_vibs)
return f'R$ {total:.2f}'
admin.site.register(Infratores, InfratoresAdmin) | 33.24 | 84 | 0.688327 | 106 | 831 | 5.207547 | 0.424528 | 0.065217 | 0.108696 | 0.119565 | 0.320652 | 0.108696 | 0 | 0 | 0 | 0 | 0 | 0.002937 | 0.180505 | 831 | 25 | 85 | 33.24 | 0.807636 | 0.031288 | 0 | 0.166667 | 0 | 0 | 0.089552 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.111111 | 0.111111 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
58bb3bdee68016c8f1865176bbbb0531b4055727 | 855 | py | Python | lintcode/1375.2.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:49.000Z | 2021-01-08T06:57:49.000Z | lintcode/1375.2.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | null | null | null | lintcode/1375.2.py | jianershi/algorithm | c3c38723b9c5f1cc745550d89e228f92fd4abfb2 | [
"MIT"
] | 1 | 2021-01-08T06:57:52.000Z | 2021-01-08T06:57:52.000Z | """
1375. Substring With At Least K Distinct Characters
"""
class Solution:
"""
@param s: a string
@param k: an integer
@return: the number of substrings there are that contain at least k distinct characters
"""
def kDistinctCharacters(self, s, k):
# Write your code here
n = len(s)
left = 0
count = [0] * 256
distinct_count = 0
substring_count = 0
for right in range(n):
count[ord(s[right])] += 1
if count[ord(s[right])] == 1:
distinct_count += 1
while left <= right and distinct_count >= k:
substring_count += n - right
count[ord(s[left])] -= 1
if count[ord(s[left])] == 0:
distinct_count -= 1
left += 1
return substring_count
| 28.5 | 91 | 0.512281 | 104 | 855 | 4.144231 | 0.451923 | 0.12065 | 0.083527 | 0.074246 | 0.222738 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034286 | 0.385965 | 855 | 29 | 92 | 29.482759 | 0.786667 | 0.235088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58bc76fe979d8a17599711a8021f4425b357315a | 1,159 | py | Python | bootcamp/wiki/core/compat.py | basiltiger/easy_bootcamp | 875b9ed287f1a7824bb38f142dbe2f3b1ce54389 | [
"MIT"
] | null | null | null | bootcamp/wiki/core/compat.py | basiltiger/easy_bootcamp | 875b9ed287f1a7824bb38f142dbe2f3b1ce54389 | [
"MIT"
] | null | null | null | bootcamp/wiki/core/compat.py | basiltiger/easy_bootcamp | 875b9ed287f1a7824bb38f142dbe2f3b1ce54389 | [
"MIT"
] | null | null | null | """Abstraction layer to deal with Django related changes in order to keep
compatibility with several Django versions simultaneously."""
from __future__ import unicode_literals
from django.conf import settings as django_settings
USER_MODEL = getattr(django_settings, 'AUTH_USER_MODEL', 'auth.User')
# Django 1.11 Widget.build_attrs has a different signature, designed for the new
# template based rendering. The previous version was more useful for our needs,
# so we restore that version.
# When support for Django < 1.11 is dropped, we should look at using the
# new template based rendering, at which point this probably won't be needed at all.
class BuildAttrsCompat(object):
def build_attrs_compat(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = self.attrs.copy()
if extra_attrs is not None:
attrs.update(extra_attrs)
if kwargs is not None:
attrs.update(kwargs)
return attrs
try:
# Python 3
from urllib.parse import urljoin # noqa
except ImportError:
# Python 2
from urlparse import urljoin # noqa @UnusedImport
| 36.21875 | 84 | 0.734254 | 163 | 1,159 | 5.122699 | 0.631902 | 0.035928 | 0.021557 | 0.045509 | 0.11497 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008686 | 0.205349 | 1,159 | 31 | 85 | 37.387097 | 0.897937 | 0.487489 | 0 | 0 | 0 | 0 | 0.121451 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.3125 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
58c135e6998a8525b0faabf5c07d8105ddf708e8 | 1,596 | py | Python | Lista 2/Questao_1.py | flaviomelo10/Python-para-PLN | 845da043c2618f3aace655cf065fca3d866342d5 | [
"MIT"
] | null | null | null | Lista 2/Questao_1.py | flaviomelo10/Python-para-PLN | 845da043c2618f3aace655cf065fca3d866342d5 | [
"MIT"
] | null | null | null | Lista 2/Questao_1.py | flaviomelo10/Python-para-PLN | 845da043c2618f3aace655cf065fca3d866342d5 | [
"MIT"
] | null | null | null | # -- encoding:utf-8 -- #
'''
Crie uma variável com a string “ instituto de ciências matemáticas e de computação” e faça:
a. Concatene (adicione) uma outra string chamada “usp”
b. Concatene (adicione) uma outra informação: 2021
c. Verifique o tamanho da nova string (com as informações adicionadas das questões a e b), com referência a caracteres e espaços
d. Transforme a string inteiramente em maiúsculo
e. Transforme a string inteiramente em minúsculo
f. Retire o espaço que está no início da string e imprima a string
g. Substitua todas as letras ‘a’ por ‘x’
h. Separe a string em palavras únicas
i. Verifique quantas palavras existem na string
j. Separe a string por meio da palavra “de”
k. Verifique agora quantas palavras/frases foram formadas quando houve a separação pela palavra “de”
l. Junte as palavras que foram separadas (pode usar a separação resultante da questão h ou j)
m. Junte as palavras que foram separadas, mas agora separadas por uma barra invertida, não por espaços (pode usar a separação resultante da questão h ou j)
'''
texto = " instituto de ciências matemáticas e de computação"
#a)
texto = texto + " usp"
print(texto)
#b)
texto = texto + " 2021"
print(texto)
#c)
tamanho = len(texto)
print(tamanho)
#d)
print(texto.upper())
#e)
print(texto.lower())
#f)
print(texto[1:])
print(texto.strip())
#g)
print(texto.replace('a', 'x'))
#h
separar = texto.split()
print(separar)
#i)
print(separar)
#j)
separar2 = texto.split('de')
print(separar2)
#k)
print(len(separar2))
#l)
juntar = " ".join(separar)
print(juntar)
#m)
juntar2 = "/".join(separar)
print(juntar2)
| 24.181818 | 155 | 0.734962 | 252 | 1,596 | 4.654762 | 0.416667 | 0.059676 | 0.032396 | 0.051151 | 0.250639 | 0.197783 | 0.143223 | 0.069906 | 0.069906 | 0.069906 | 0 | 0.011194 | 0.160401 | 1,596 | 65 | 156 | 24.553846 | 0.864179 | 0.664787 | 0 | 0.181818 | 0 | 0 | 0.126706 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.636364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
58c4071d4471ff72fd95738a79b453160bfc2e4b | 252 | py | Python | credsweeper/file_handler/analysis_target.py | ARKAD97/CredSweeper | 0f613cded13d6c28c19c57eac54dd245b2c318ea | [
"MIT"
] | null | null | null | credsweeper/file_handler/analysis_target.py | ARKAD97/CredSweeper | 0f613cded13d6c28c19c57eac54dd245b2c318ea | [
"MIT"
] | null | null | null | credsweeper/file_handler/analysis_target.py | ARKAD97/CredSweeper | 0f613cded13d6c28c19c57eac54dd245b2c318ea | [
"MIT"
] | null | null | null | from typing import List
class AnalysisTarget:
def __init__(self, line: str, line_num: int, lines: List[str], file_path: str):
self.line = line
self.line_num = line_num
self.lines = lines
self.file_path = file_path
| 25.2 | 83 | 0.650794 | 36 | 252 | 4.277778 | 0.444444 | 0.155844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.261905 | 252 | 9 | 84 | 28 | 0.827957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58c6d6c03c23a334c302f4903855ceb65421ce9b | 2,341 | py | Python | CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py | MY-Climate-Observatory/myco-data | 5203fa63c7ce609bbc9bbc4186f55da78befdc50 | [
"CC-BY-4.0"
] | null | null | null | CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py | MY-Climate-Observatory/myco-data | 5203fa63c7ce609bbc9bbc4186f55da78befdc50 | [
"CC-BY-4.0"
] | null | null | null | CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py | MY-Climate-Observatory/myco-data | 5203fa63c7ce609bbc9bbc4186f55da78befdc50 | [
"CC-BY-4.0"
] | 1 | 2021-12-16T04:56:09.000Z | 2021-12-16T04:56:09.000Z | # -*- coding: utf-8 -*-
"""
17 June 2020
Author: Xiandi Ooi
Visualizing the types of pollutants.
"""
import pandas as pd
from plotly.offline import plot
import plotly.graph_objects as go
# Get the file from us
df = pd.read_csv(https://www.dropbox.com/s/u0ymg0ufne0an60/api-20200713.csv?dl=1", sep = ";")
# Make the selection
selected_area = "Sandakan"
df_select = df.loc[(df.Area == selected_area),
["Area", "Dominant", "Datetime"]]
# Data wrangling for this particular visual
df_update = df_select.set_index(pd.DatetimeIndex(df_select["Datetime"]))
df_update.drop(df_update.columns[2], axis = 1, inplace = True)
# Wrangling
df_group_time = df_update.groupby(pd.Grouper(freq = "Q")).size().reset_index(name = "Total")
df_group = df_update.groupby([pd.Grouper(freq = "Q"),
pd.Grouper("Dominant")]).size().reset_index(name = "Count")
df_output = df_group.set_index("Datetime").join(df_group_time.set_index("Datetime"))
df_output["Frequency"] = df_output["Count"] / df_output["Total"]
# Creating df subset for the stacked bars, here we are only dealing with the main dominant pollutants
df_pm2_5 = df_output.loc[(df_output.Dominant == "**")]
df_pm10 = df_output.loc[(df_output.Dominant == "*")]
df_so2 = df_output.loc[(df_output.Dominant == "a")]
df_no2 = df_output.loc[(df_output.Dominant == "b")]
df_o3 = df_output.loc[(df_output.Dominant == "c")]
df_co = df_output.loc[(df_output.Dominant == "d")]
# Now comes the bar chart
fig = go.Figure()
fig.add_trace(go.Bar(x = df_pm2_5.index,
y = df_pm2_5["Frequency"],
name = "PM 2.5"))
fig.add_trace(go.Bar(x = df_pm10.index,
y = df_pm10["Frequency"],
name = "PM 10"))
fig.add_trace(go.Bar(x = df_so2.index,
y = df_so2["Frequency"],
name = "SO2"))
fig.add_trace(go.Bar(x = df_no2.index,
y = df_no2["Frequency"],
name = "NO2"))
fig.add_trace(go.Bar(x = df_o3.index,
y = df_o3["Frequency"],
name = "O3"))
fig.add_trace(go.Bar(x = df_co.index,
y = df_co["Frequency"],
name = "CO"))
fig.update_layout(barmode = "stack", title_text="Frequency of Detected Pollutants")
plot(fig)
| 32.971831 | 101 | 0.612986 | 337 | 2,341 | 4.05638 | 0.35905 | 0.093636 | 0.048281 | 0.057059 | 0.247257 | 0.247257 | 0.168252 | 0 | 0 | 0 | 0 | 0.028412 | 0.233234 | 2,341 | 70 | 102 | 33.442857 | 0.733148 | 0.101666 | 0 | 0 | 0 | 0 | 0.108081 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.073171 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58c8432548a967e56cf908c27cbcc2cdbca067b8 | 1,434 | py | Python | various_modules/interface_segregation_principle.py | Neykah/design_patterns_python | 6f801fc4fc60f2d34002e4fe435feb6111a2cd23 | [
"MIT"
] | null | null | null | various_modules/interface_segregation_principle.py | Neykah/design_patterns_python | 6f801fc4fc60f2d34002e4fe435feb6111a2cd23 | [
"MIT"
] | null | null | null | various_modules/interface_segregation_principle.py | Neykah/design_patterns_python | 6f801fc4fc60f2d34002e4fe435feb6111a2cd23 | [
"MIT"
] | null | null | null | """
Maybe not so relevant in Python due to the possibility to use multiple inheritance...
"""
from abc import ABC, abstractmethod
class CloudHostingProvider(ABC):
@abstractmethod
def create_server(region):
...
@abstractmethod
def list_servers(region):
...
class CDNProvider(ABC):
@abstractmethod
def get_cdna_address():
...
class CloudStorageProvider(ABC):
@abstractmethod
def store_file(name):
...
@abstractmethod
def get_file(name):
...
class Amazon(CloudHostingProvider, CDNProvider, CloudStorageProvider):
def store_file(self, name: str):
print(f"Storing the file {name} in AWS...")
def get_file(self, name: str):
print(f"Getting the file {name} from AWS...")
def create_server(self, region: str):
print(f"Creating a new server in the following region: {region}...")
def list_servers(self, region: str):
print(f"List all servers available in {region}...")
def get_cdna_address(self):
print("AWS CDNA address: ...")
class Dropbox(CloudStorageProvider):
def store_file(self, name: str):
print(f"Storing the file {name} in Dropbox...")
def get_file(self, name: str):
print(f"Getting the file {name} from Dropbox...")
if __name__ == "__main__":
amazon = Amazon()
dropbox = Dropbox()
amazon.get_file("Baba")
dropbox.store_file("Baba")
| 22.40625 | 85 | 0.642259 | 172 | 1,434 | 5.215116 | 0.296512 | 0.053512 | 0.060201 | 0.06689 | 0.305463 | 0.263099 | 0.263099 | 0.263099 | 0.263099 | 0.263099 | 0 | 0 | 0.232915 | 1,434 | 63 | 86 | 22.761905 | 0.815455 | 0.059275 | 0 | 0.35 | 0 | 0 | 0.208799 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.3 | false | 0 | 0.025 | 0 | 0.45 | 0.175 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58cc767b16ca728bd586a1ff7e220380c8ce5e1a | 3,529 | py | Python | shp_code/prec_reformat.py | anahm/inferring-population-preferences | 1eec9c6966e65c615f3cf5bd769ab121369b926d | [
"Unlicense"
] | 4 | 2016-10-29T12:10:48.000Z | 2016-11-06T02:25:09.000Z | shp_code/prec_reformat.py | anahm/inferring-population-preferences | 1eec9c6966e65c615f3cf5bd769ab121369b926d | [
"Unlicense"
] | null | null | null | shp_code/prec_reformat.py | anahm/inferring-population-preferences | 1eec9c6966e65c615f3cf5bd769ab121369b926d | [
"Unlicense"
] | null | null | null | """
prec_reformat.py
Taking state data and having each line be a precinct's voting results and candidate
cf-scores (rather than each line be each candidate per precinct.
| prec_id | cf_score_0 | num_votes_0 | cf_score_1 | num_votes_1 |
"""
import math
import numpy as np
import pandas as pd
from prec_cd import prec_cd_main
from check_data import check_main
def convert_by_prec(old_df, state, year, dirname):
precs = []
years = []
cf_score_0 = []
num_votes_0 = []
cf_score_1 = []
num_votes_1 = []
# group by precinct (year assumed)
for key, group in old_df.groupby(['geoid']):
cf_iter = iter(group['cf_score'])
votes_iter = iter(group['num_votes'])
nxt_score = cf_iter.next()
if math.isnan(nxt_score):
nxt_score = 0
cf_0 = nxt_score
nv_0 = votes_iter.next()
try:
nxt_score = cf_iter.next()
if math.isnan(nxt_score):
nxt_score = 0
cf_1 = nxt_score
nv_1 = votes_iter.next()
# enforcing the idea that cfscore0 < cfscore1
precs.append(key)
if cf_1 < cf_0:
cf_score_0.append(cf_1)
num_votes_0.append(nv_1)
cf_score_1.append(cf_0)
num_votes_1.append(nv_0)
else:
cf_score_0.append(cf_0)
num_votes_0.append(nv_0)
cf_score_1.append(cf_1)
num_votes_1.append(nv_1)
except StopIteration:
# get rid of
pass
# use arrays to create dataframe
new_df = pd.DataFrame({
'cf_score_0': cf_score_0,
'num_votes_0': num_votes_0,
'cf_score_1': cf_score_1,
'num_votes_1': num_votes_1,
'geoid': precs},
index=None)
new_df['tot_votes'] = new_df['num_votes_0'] + new_df['num_votes_1']
new_df['midpoint'] = (new_df['cf_score_0'] + new_df['cf_score_1']) / 2.0
# write new dataframe out to csv
outfile = '%s/precline_%s_house_%s.csv' % (dirname, state, year)
new_df.to_csv(outfile)
return outfile
"""
data_clean()
Function to parse out certain types of data that are not useful in our
results.
# NOTE: overwrites the old file, since it is unnecessary
"""
def data_clean(precline_file):
df = pd.read_csv(precline_file, index_col = 0)
# remove all precincts with tot_votes == 0
df = df[df['tot_votes'] > 0]
# remove all uncontested candidates (cf_score_1 == 0)
df = df[df['cf_score_1'] != 0]
df.to_csv(precline_file, index=False)
"""
prec_reformat_main()
Function that does the bulk of the original main function and can be called
by the commandline.
@param: state, year
@return: location of new precline file
"""
def prec_reformat_main(state, year):
prec_cd_main(state, year)
csv_dir = '../data/%s_data/%s_%s' % (state, state, year)
infile = '%s/%s_house_%s_final.csv' % (csv_dir, state, year)
outfile = '%s/precline_%s_house_%s.csv' % (csv_dir, state, year)
# read in file
old_df = pd.read_csv(infile)
convert_by_prec(old_df, state, year, csv_dir)
data_clean(outfile)
print 'Precinct data written to: %s' % outfile
rep_col = 't_USH_R_%s' % year
dem_col = 't_USH_D_%s' % year
check_main(outfile, state, year, rep_col, dem_col)
def main():
state = raw_input('State: ')
year = raw_input('Year: ')
prec_reformat_main(state, year)
if __name__ == "__main__":
main()
| 26.140741 | 83 | 0.616322 | 534 | 3,529 | 3.752809 | 0.264045 | 0.059381 | 0.035928 | 0.02495 | 0.29491 | 0.16018 | 0.143713 | 0.081836 | 0.081836 | 0.081836 | 0 | 0.021535 | 0.276282 | 3,529 | 134 | 84 | 26.335821 | 0.763117 | 0.072258 | 0 | 0.081081 | 0 | 0 | 0.118823 | 0.037344 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.013514 | 0.067568 | null | null | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58ce3480a9b43387f9f12525806c69631b6a2afa | 1,668 | py | Python | scripts/make_fasta.py | orionzhou/snk-rnaseq | 5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a | [
"MIT"
] | null | null | null | scripts/make_fasta.py | orionzhou/snk-rnaseq | 5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a | [
"MIT"
] | null | null | null | scripts/make_fasta.py | orionzhou/snk-rnaseq | 5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a | [
"MIT"
] | null | null | null | from snakemake import shell
input, output, params, threads, w, config = snakemake.input, snakemake.output, snakemake.params, snakemake.threads, snakemake.wildcards, snakemake.config
genome = w.genome
params.hybrid = config['x'][genome]['hybrid']
opt = params.opt
shell("""
rm -rf {output.fna}* {output.fai}*
rm -rf {output.chrom_bed} {output.chrom_size} {output.gap}
mkdir -p {params.wdir}/{params.odir}
cd {params.wdir}/{params.odir}
rm -rf raw.fna.* renamed* map* raw.sizes
""")
merge_tag = '--merge_short' if w.genome != 'Mt_R108' else ''
if params.hybrid:
shell("""
cat {input} > {params.wdir}/{params.odir}/renamed.fna
cd {params.wdir}/{params.odir}
fasta.py size renamed.fna > renamed.sizes
touch mapf.chain mapb.chain
""")
else:
params.gap = int(config['x'][genome]['gap'])
params.prefix = config['x'][genome]['prefix']
shell("""
cd {params.wdir}/{params.odir}
ln -sf ../download/raw.fna raw.fna
fasta.py size raw.fna > raw.sizes
fasta.py rename raw.fna renamed.fna mapf.bed mapb.bed \
--opt {params.opt} {merge_tag} \
--gap {params.gap} --prefix_chr {params.prefix}
fasta.py size renamed.fna > renamed.sizes
chain.py fromBed mapf.bed raw.sizes renamed.sizes > mapf.chain
chainSwap mapf.chain mapb.chain
""")
shell("""
cd {params.wdir}
ln -sf {params.odir}/renamed.fna 10_genome.fna
cd ..
samtools faidx {output.fna}
fasta.py size --bed {output.fna} > {output.chrom_bed}
cut -f1,3 {output.chrom_bed} > {output.chrom_size}
fasta.py gaps {output.fna} > {output.gap}
""")
| 32.076923 | 153 | 0.631894 | 232 | 1,668 | 4.49569 | 0.267241 | 0.057526 | 0.076702 | 0.095877 | 0.182167 | 0.118888 | 0.063279 | 0 | 0 | 0 | 0 | 0.005295 | 0.207434 | 1,668 | 51 | 154 | 32.705882 | 0.783661 | 0 | 0 | 0.302326 | 0 | 0 | 0.714029 | 0.103118 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.023256 | 0 | 0.023256 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58cfe77be2b1a529ec5b49496f3549cf64c84e22 | 1,107 | py | Python | plugins/python/test/testCustomEntity.py | shotgunsoftware/cplusplus-api | 576aab4ae266e37ba80da23f82fe9ed08b9894e4 | [
"BSD-3-Clause"
] | 3 | 2015-04-04T03:08:52.000Z | 2021-01-09T00:09:25.000Z | plugins/python/test/testCustomEntity.py | shotgunsoftware/cplusplus-api | 576aab4ae266e37ba80da23f82fe9ed08b9894e4 | [
"BSD-3-Clause"
] | null | null | null | plugins/python/test/testCustomEntity.py | shotgunsoftware/cplusplus-api | 576aab4ae266e37ba80da23f82fe9ed08b9894e4 | [
"BSD-3-Clause"
] | 4 | 2015-04-04T03:08:57.000Z | 2021-10-03T14:59:23.000Z | #!/usr/bin/env python
import sys
from shotgun import *
try:
if len(sys.argv) > 1:
sg = Shotgun(sys.argv[1])
else:
sg = Shotgun()
#################################################################
# Find CustomEntity01 entities
#################################################################
print "*" * 40, "findEntities - CustomEntity01", "*" * 40
for entity in sg.findEntities("CustomEntity01", FilterBy(), 5):
#print entity
#print "-" * 40
print "%s : %s" % (entity.sgProjectCode(), entity.getAttrValue("code"))
#################################################################
# Find CustomEntity02 entities
#################################################################
print "*" * 40, "findEntities - CustomEntity02", "*" * 40
for entity in sg.findEntities("CustomEntity02", FilterBy(), 5):
#print entity
#print "-" * 40
print "%s : %s" % (entity.sgProjectCode(), entity.getAttrValue("code"))
except SgError, e:
print "SgError:", e
except Exception, e:
print "Error:", e
| 31.628571 | 79 | 0.443541 | 91 | 1,107 | 5.395604 | 0.395604 | 0.057026 | 0.032587 | 0.10998 | 0.415479 | 0.415479 | 0.305499 | 0.305499 | 0.305499 | 0.305499 | 0 | 0.031854 | 0.205962 | 1,107 | 34 | 80 | 32.558824 | 0.526735 | 0.117435 | 0 | 0.117647 | 0 | 0 | 0.177715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.117647 | null | null | 0.352941 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58df035c2ab9c1b7f4e6cbacccfa792d055318cf | 9,362 | py | Python | Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/core/protobuf/graph_debug_info_pb2.py | lawrence910426/ProgrammingII_FinalProject | 493183dc2a674310e65bffe3a5e00395e8bebb4b | [
"MIT"
] | null | null | null | Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/core/protobuf/graph_debug_info_pb2.py | lawrence910426/ProgrammingII_FinalProject | 493183dc2a674310e65bffe3a5e00395e8bebb4b | [
"MIT"
] | null | null | null | Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/core/protobuf/graph_debug_info_pb2.py | lawrence910426/ProgrammingII_FinalProject | 493183dc2a674310e65bffe3a5e00395e8bebb4b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/graph_debug_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/graph_debug_info.proto',
package='tensorflow',
syntax='proto3',
serialized_options=_b('\n\030org.tensorflow.frameworkB\024GraphDebugInfoProtosP\001ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\370\001\001'),
serialized_pb=_b('\n/tensorflow/core/protobuf/graph_debug_info.proto\x12\ntensorflow\"\xd5\x02\n\x0eGraphDebugInfo\x12\r\n\x05\x66iles\x18\x01 \x03(\t\x12\x36\n\x06traces\x18\x02 \x03(\x0b\x32&.tensorflow.GraphDebugInfo.TracesEntry\x1aX\n\x0b\x46ileLineCol\x12\x12\n\nfile_index\x18\x01 \x01(\x05\x12\x0c\n\x04line\x18\x02 \x01(\x05\x12\x0b\n\x03\x63ol\x18\x03 \x01(\x05\x12\x0c\n\x04\x66unc\x18\x04 \x01(\t\x12\x0c\n\x04\x63ode\x18\x05 \x01(\t\x1aL\n\nStackTrace\x12>\n\x0e\x66ile_line_cols\x18\x01 \x03(\x0b\x32&.tensorflow.GraphDebugInfo.FileLineCol\x1aT\n\x0bTracesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.tensorflow.GraphDebugInfo.StackTrace:\x02\x38\x01\x42\x8c\x01\n\x18org.tensorflow.frameworkB\x14GraphDebugInfoProtosP\x01ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01\x62\x06proto3')
)
_GRAPHDEBUGINFO_FILELINECOL = _descriptor.Descriptor(
name='FileLineCol',
full_name='tensorflow.GraphDebugInfo.FileLineCol',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_index', full_name='tensorflow.GraphDebugInfo.FileLineCol.file_index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='line', full_name='tensorflow.GraphDebugInfo.FileLineCol.line', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='col', full_name='tensorflow.GraphDebugInfo.FileLineCol.col', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='func', full_name='tensorflow.GraphDebugInfo.FileLineCol.func', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='tensorflow.GraphDebugInfo.FileLineCol.code', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=153,
serialized_end=241,
)
_GRAPHDEBUGINFO_STACKTRACE = _descriptor.Descriptor(
name='StackTrace',
full_name='tensorflow.GraphDebugInfo.StackTrace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_line_cols', full_name='tensorflow.GraphDebugInfo.StackTrace.file_line_cols', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=243,
serialized_end=319,
)
_GRAPHDEBUGINFO_TRACESENTRY = _descriptor.Descriptor(
name='TracesEntry',
full_name='tensorflow.GraphDebugInfo.TracesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.GraphDebugInfo.TracesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.GraphDebugInfo.TracesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=321,
serialized_end=405,
)
_GRAPHDEBUGINFO = _descriptor.Descriptor(
name='GraphDebugInfo',
full_name='tensorflow.GraphDebugInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='files', full_name='tensorflow.GraphDebugInfo.files', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='traces', full_name='tensorflow.GraphDebugInfo.traces', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GRAPHDEBUGINFO_FILELINECOL, _GRAPHDEBUGINFO_STACKTRACE, _GRAPHDEBUGINFO_TRACESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=405,
)
_GRAPHDEBUGINFO_FILELINECOL.containing_type = _GRAPHDEBUGINFO
_GRAPHDEBUGINFO_STACKTRACE.fields_by_name['file_line_cols'].message_type = _GRAPHDEBUGINFO_FILELINECOL
_GRAPHDEBUGINFO_STACKTRACE.containing_type = _GRAPHDEBUGINFO
_GRAPHDEBUGINFO_TRACESENTRY.fields_by_name['value'].message_type = _GRAPHDEBUGINFO_STACKTRACE
_GRAPHDEBUGINFO_TRACESENTRY.containing_type = _GRAPHDEBUGINFO
_GRAPHDEBUGINFO.fields_by_name['traces'].message_type = _GRAPHDEBUGINFO_TRACESENTRY
DESCRIPTOR.message_types_by_name['GraphDebugInfo'] = _GRAPHDEBUGINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GraphDebugInfo = _reflection.GeneratedProtocolMessageType('GraphDebugInfo', (_message.Message,), {
'FileLineCol' : _reflection.GeneratedProtocolMessageType('FileLineCol', (_message.Message,), {
'DESCRIPTOR' : _GRAPHDEBUGINFO_FILELINECOL,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo.FileLineCol)
})
,
'StackTrace' : _reflection.GeneratedProtocolMessageType('StackTrace', (_message.Message,), {
'DESCRIPTOR' : _GRAPHDEBUGINFO_STACKTRACE,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo.StackTrace)
})
,
'TracesEntry' : _reflection.GeneratedProtocolMessageType('TracesEntry', (_message.Message,), {
'DESCRIPTOR' : _GRAPHDEBUGINFO_TRACESENTRY,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo.TracesEntry)
})
,
'DESCRIPTOR' : _GRAPHDEBUGINFO,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo)
})
_sym_db.RegisterMessage(GraphDebugInfo)
_sym_db.RegisterMessage(GraphDebugInfo.FileLineCol)
_sym_db.RegisterMessage(GraphDebugInfo.StackTrace)
_sym_db.RegisterMessage(GraphDebugInfo.TracesEntry)
DESCRIPTOR._options = None
_GRAPHDEBUGINFO_TRACESENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 39.838298 | 888 | 0.760735 | 1,117 | 9,362 | 6.081468 | 0.151298 | 0.040041 | 0.037097 | 0.06595 | 0.592522 | 0.493891 | 0.493891 | 0.475784 | 0.470926 | 0.470926 | 0 | 0.035446 | 0.117069 | 9,362 | 234 | 889 | 40.008547 | 0.786354 | 0.053194 | 0 | 0.595122 | 1 | 0.009756 | 0.244748 | 0.202169 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.02439 | 0 | 0.02439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58e63151e272298d99abe2311270c00ae4f753a6 | 2,109 | py | Python | tests/common/bridgecrew/vulnerability_scanning/conftest.py | vangundy-jason-pfg/checkov | 2fb50908f62390c98dda665f1fa94fe24806b654 | [
"Apache-2.0"
] | 1 | 2021-02-13T15:24:42.000Z | 2021-02-13T15:24:42.000Z | tests/common/bridgecrew/vulnerability_scanning/conftest.py | vangundy-jason-pfg/checkov | 2fb50908f62390c98dda665f1fa94fe24806b654 | [
"Apache-2.0"
] | 7 | 2021-04-12T06:54:07.000Z | 2022-03-21T14:04:14.000Z | tests/common/bridgecrew/vulnerability_scanning/conftest.py | vangundy-jason-pfg/checkov | 2fb50908f62390c98dda665f1fa94fe24806b654 | [
"Apache-2.0"
] | 1 | 2021-12-16T03:09:55.000Z | 2021-12-16T03:09:55.000Z | from typing import Dict, Any
import pytest
from checkov.common.bridgecrew.bc_source import SourceType
from checkov.common.bridgecrew.platform_integration import BcPlatformIntegration, bc_integration
@pytest.fixture()
def mock_bc_integration() -> BcPlatformIntegration:
bc_integration.bc_api_key = "abcd1234-abcd-1234-abcd-1234abcd1234"
bc_integration.setup_bridgecrew_credentials(
repo_id="bridgecrewio/checkov",
skip_fixes=True,
skip_suppressions=True,
skip_policy_download=True,
source=SourceType("Github", False),
source_version="1.0",
repo_branch="master",
)
return bc_integration
@pytest.fixture()
def scan_result() -> Dict[str, Any]:
return {
"repository": "/abs_path/to/app/requirements.txt",
"passed": True,
"packages": {"type": "python", "name": "django", "version": "1.2", "path": "/abs_path/to/app/requirements.txt"},
"complianceIssues": None,
"complianceDistribution": {"critical": 0, "high": 0, "medium": 0, "low": 0, "total": 0},
"vulnerabilities": [
{
"id": "CVE-2019-19844",
"status": "fixed in 3.0.1, 2.2.9, 1.11.27",
"cvss": 9.8,
"vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"description": "Django before 1.11.27, 2.x before 2.2.9, and 3.x before 3.0.1 allows account takeover.",
"severity": "critical",
"packageName": "django",
"packageVersion": "1.2",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2019-19844",
"riskFactors": ["Critical severity", "Has fix", "Attack complexity: low", "Attack vector: network"],
"impactedVersions": ["\u003c1.11.27"],
"publishedDate": "2019-12-18T20:15:00+01:00",
"discoveredDate": "2019-12-18T19:15:00Z",
"fixDate": "2019-12-18T20:15:00+01:00",
}
],
"vulnerabilityDistribution": {"critical": 1, "high": 0, "medium": 0, "low": 0, "total": 0},
}
| 40.557692 | 120 | 0.579896 | 248 | 2,109 | 4.842742 | 0.524194 | 0.054122 | 0.02831 | 0.044963 | 0.161532 | 0.113239 | 0.068276 | 0.036636 | 0 | 0 | 0 | 0.086569 | 0.255097 | 2,109 | 51 | 121 | 41.352941 | 0.677912 | 0 | 0 | 0.044444 | 0 | 0.044444 | 0.398767 | 0.11522 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | true | 0.022222 | 0.088889 | 0.022222 | 0.177778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58e6b8cbdb9f5deb8475e765553e3c1da2be8892 | 1,038 | py | Python | image_matting/modules/trimap_generator/trimap_generator_application.py | image-matting/backend | bbf502539cf70822dadb5eded31529d5e66c6276 | [
"Apache-2.0"
] | 1 | 2022-01-22T04:12:48.000Z | 2022-01-22T04:12:48.000Z | image_matting/modules/trimap_generator/trimap_generator_application.py | image-matting/backend | bbf502539cf70822dadb5eded31529d5e66c6276 | [
"Apache-2.0"
] | 4 | 2021-12-23T14:02:17.000Z | 2022-01-26T18:44:06.000Z | image_matting/modules/trimap_generator/trimap_generator_application.py | image-matting/backend | bbf502539cf70822dadb5eded31529d5e66c6276 | [
"Apache-2.0"
] | null | null | null | import argparse
from pathlib import Path
from cv2 import cv2
from trimap import generate_trimap
from trimap_output_utils import save_trimap_output
def main():
args = parse_args()
image_path = args.image
output_directory_path = args.output
image_path = Path(image_path)
if not image_path.is_file():
raise RuntimeError(f'The provided image path "{image_path}" does not exist!')
image_filename = image_path.stem
saliency_image_path = image_path.as_posix()
trimap_image = generate_trimap(saliency_image_path, kernel_size=3, iterations=20)
save_trimap_output(trimap_image, image_filename, output_directory_path)
def parse_args():
parser = argparse.ArgumentParser(description='Trimap Generator Application')
parser.add_argument('-i', '--image', required=True, type=str, help='path to input image')
parser.add_argument('-o', '--output', required=False, default='.', type=str, help='path to output directory')
return parser.parse_args()
if __name__ == "__main__":
main()
| 29.657143 | 113 | 0.739884 | 141 | 1,038 | 5.141844 | 0.411348 | 0.124138 | 0.053793 | 0.049655 | 0.046897 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005721 | 0.157996 | 1,038 | 34 | 114 | 30.529412 | 0.823799 | 0 | 0 | 0 | 1 | 0 | 0.147399 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.217391 | 0 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58e7d15456033fa62d2766b6d09f022fb1eb2ace | 3,137 | py | Python | spacy/lang/nl/stop_words.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 12 | 2019-03-20T20:43:47.000Z | 2020-04-13T11:10:52.000Z | spacy/lang/nl/stop_words.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 13 | 2018-06-05T11:54:40.000Z | 2019-07-02T11:33:14.000Z | spacy/lang/nl/stop_words.py | cedar101/spaCy | 66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95 | [
"MIT"
] | 1 | 2020-05-12T16:00:38.000Z | 2020-05-12T16:00:38.000Z | # coding: utf8
from __future__ import unicode_literals
# The original stop words list (added in f46ffe3) was taken from
# http://www.damienvanholten.com/downloads/dutch-stop-words.txt
# and consisted of about 100 tokens.
# In order to achieve parity with some of the better-supported
# languages, e.g., English, French, and German, this original list has been
# extended with 200 additional tokens. The main source of inspiration was
# https://raw.githubusercontent.com/stopwords-iso/stopwords-nl/master/stopwords-nl.txt.
# However, quite a bit of manual editing has taken place as well.
# Tokens whose status as a stop word is not entirely clear were admitted or
# rejected by deferring to their counterparts in the stop words lists for English
# and French. Similarly, those lists were used to identify and fill in gaps so
# that -- in principle -- each token contained in the English stop words list
# should have a Dutch counterpart here.
STOP_WORDS = set("""
aan af al alle alles allebei alleen allen als altijd ander anders andere anderen aangaangde aangezien achter achterna
afgelopen aldus alhoewel anderzijds
ben bij bijna bijvoorbeeld behalve beide beiden beneden bent bepaald beter betere betreffende binnen binnenin boven
bovenal bovendien bovenstaand buiten
daar dan dat de der den deze die dit doch doen door dus daarheen daarin daarna daarnet daarom daarop des dezelfde dezen
dien dikwijls doet doorgaand doorgaans
een eens en er echter enige eerder eerst eerste eersten effe eigen elk elke enkel enkele enz erdoor etc even eveneens
evenwel
ff
ge geen geweest gauw gedurende gegeven gehad geheel gekund geleden gelijk gemogen geven geweest gewoon gewoonweg
geworden gij
haar had heb hebben heeft hem het hier hij hoe hun hadden hare hebt hele hen hierbeneden hierboven hierin hoewel hun
iemand iets ik in is idd ieder ikke ikzelf indien inmiddels inz inzake
ja je jou jouw jullie jezelf jij jijzelf jouwe juist
kan kon kunnen klaar konden krachtens kunnen kunt
lang later liet liever
maar me meer men met mij mijn moet mag mede meer meesten mezelf mijzelf min minder misschien mocht mochten moest moesten
moet moeten mogelijk mogen
na naar niet niets nog nu nabij nadat net nogal nooit nr nu
of om omdat ons ook op over omhoog omlaag omstreeks omtrent omver onder ondertussen ongeveer onszelf onze ooit opdat
opnieuw opzij over overigens
pas pp precies prof publ
reeds rond rondom
sedert sinds sindsdien slechts sommige spoedig steeds
‘t 't te tegen toch toen tot tamelijk ten tenzij ter terwijl thans tijdens toe totdat tussen
u uit uw uitgezonderd uwe uwen
van veel voor vaak vanaf vandaan vanuit vanwege veeleer verder verre vervolgens vgl volgens vooraf vooral vooralsnog
voorbij voordat voordien voorheen voorop voort voorts vooruit vrij vroeg
want waren was wat we wel werd wezen wie wij wil worden waar waarom wanneer want weer weg wegens weinig weinige weldra
welk welke welken werd werden wiens wier wilde wordt
zal ze zei zelf zich zij zijn zo zonder zou zeer zeker zekere zelfde zelfs zichzelf zijnde zijne zo’n zoals zodra zouden
zoveel zowat zulk zulke zulks zullen zult
""".split())
| 42.391892 | 120 | 0.808734 | 507 | 3,137 | 4.99211 | 0.871795 | 0.01778 | 0.010273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003828 | 0.167357 | 3,137 | 73 | 121 | 42.972603 | 0.965161 | 0.27861 | 0 | 0 | 0 | 0.171429 | 0.959964 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.028571 | 0 | 0.028571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58f3c7c8febd7b51f53b623ee90e4c562e1d0bd1 | 659 | py | Python | easy_ArrayAdditionI.py | GabrielGhe/CoderbyteChallenges | 5601dbc24c95a65fed04896de2f534417c2e730d | [
"MIT"
] | 1 | 2020-11-04T15:30:18.000Z | 2020-11-04T15:30:18.000Z | easy_ArrayAdditionI.py | GabrielGhe/CoderbyteChallenges | 5601dbc24c95a65fed04896de2f534417c2e730d | [
"MIT"
] | null | null | null | easy_ArrayAdditionI.py | GabrielGhe/CoderbyteChallenges | 5601dbc24c95a65fed04896de2f534417c2e730d | [
"MIT"
] | null | null | null | import itertools
#################################################
# This function will see if there is any #
# possible combination of the numbers in #
# the array that will give the largest number #
#################################################
def ArrayAdditionI(arr):
#sort, remove last element
result = "false"
arr.sort()
large = arr[-1]
arr = arr[:-1]
#go through every combination and see if sum = large
for x in range(2,len(arr) + 1):
for comb in itertools.combinations(arr,x):
if large == sum(comb):
result = "true"
break
return result
print ArrayAdditionI(raw_input())
| 26.36 | 54 | 0.53566 | 78 | 659 | 4.512821 | 0.641026 | 0.034091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007952 | 0.236722 | 659 | 24 | 55 | 27.458333 | 0.691849 | 0.327769 | 0 | 0 | 0 | 0 | 0.026786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.076923 | null | null | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
58f8d01058e75992d07c8d9e6c624ed7a5775471 | 771 | py | Python | script/solr_unauthorized_access.py | 5up3rc/Vxscan | 0d2cae446f6502b51596853be3514c7c4c62809c | [
"Apache-2.0"
] | 2 | 2019-12-05T01:58:22.000Z | 2019-12-14T09:19:28.000Z | script/solr_unauthorized_access.py | 5up3rc/Vxscan | 0d2cae446f6502b51596853be3514c7c4c62809c | [
"Apache-2.0"
] | null | null | null | script/solr_unauthorized_access.py | 5up3rc/Vxscan | 0d2cae446f6502b51596853be3514c7c4c62809c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# project = https://github.com/Xyntax/POC-T
# author = i@cdxy.me
"""
Apache Solr 未授权访问PoC
(iterate_path函数使用场景示例)
Usage
python POC-T.py -s solr-unauth -iF target.txt
python POC-T.py -s solr-unauth -aZ "solr country:cn"
"""
from lib.verify import verify
from lib.random_header import get_ua
import requests
vuln = ['solr']
def check(ip, ports, apps):
if verify(vuln, ports, apps):
try:
url = 'http://' + ip
url = url + '/solr/'
g = requests.get(url, headers=get_ua(), timeout=5)
if g.status_code is 200 and 'Solr Admin' in g.content and 'Dashboard' in g.content:
return 'Apache Solr Admin leask'
except Exception:
pass
| 24.09375 | 95 | 0.608301 | 112 | 771 | 4.142857 | 0.616071 | 0.025862 | 0.043103 | 0.051724 | 0.099138 | 0.099138 | 0.099138 | 0 | 0 | 0 | 0 | 0.008772 | 0.2607 | 771 | 31 | 96 | 24.870968 | 0.805263 | 0.335927 | 0 | 0 | 0 | 0 | 0.118 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.071429 | 0.214286 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
58f91a9f5c9302c8e95efa47c83b819f09e32089 | 1,248 | py | Python | conanfile.py | midurk/conan-rapidxml | df93616a87ba41edd9def914f765fd8eae0007c5 | [
"MIT"
] | null | null | null | conanfile.py | midurk/conan-rapidxml | df93616a87ba41edd9def914f765fd8eae0007c5 | [
"MIT"
] | null | null | null | conanfile.py | midurk/conan-rapidxml | df93616a87ba41edd9def914f765fd8eae0007c5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, tools
import os
class RapiXMLConan(ConanFile):
name = "rapidxml"
version = "1.13"
description = "RapidXml is an attempt to create the fastest XML parser possible"
url = "https://github.com/bincrafters/conan-rapidxml"
homepage = "http://rapidxml.sourceforge.net"
author = "Bincrafters <bincrafters@gmail.com>"
license = ("BSL-1.0", "MIT")
exports = ["LICENSE.md"]
exports_sources = ["CMakeLists.txt", "name_lookup_changes_fix.patch"]
source_subfolder = "source_subfolder"
no_copy_source = True
def source(self):
source_url = "https://cfhcable.dl.sourceforge.net/project/rapidxml/rapidxml/rapidxml%20"
tools.get("{0}{1}/{2}-{3}.zip".format(source_url, self.version, self.name, self.version))
os.rename(self.name + "-" + self.version, self.source_subfolder)
tools.patch(base_path=self.source_subfolder, patch_file="name_lookup_changes_fix.patch")
def package(self):
self.copy(pattern="license.txt", dst="licenses", src=self.source_subfolder)
self.copy(pattern="*.hpp", dst="include", src=self.source_subfolder)
def package_id(self):
self.info.header_only()
| 37.818182 | 97 | 0.684295 | 162 | 1,248 | 5.141975 | 0.537037 | 0.108043 | 0.091236 | 0.048019 | 0.060024 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011538 | 0.166667 | 1,248 | 32 | 98 | 39 | 0.789423 | 0.033654 | 0 | 0 | 0 | 0 | 0.347176 | 0.067276 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.083333 | 0 | 0.708333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
4506dc61f56a8eae8242703dae9838d15d5a49a2 | 2,327 | py | Python | test/test_session.py | Sunmxt/UESTC-EAMS | 760a7387a5d73967e45a0b9d211acb383bb50fe1 | [
"Apache-2.0"
] | 1 | 2020-07-25T13:53:35.000Z | 2020-07-25T13:53:35.000Z | test/test_session.py | Sunmxt/UESTC-EAMS | 760a7387a5d73967e45a0b9d211acb383bb50fe1 | [
"Apache-2.0"
] | null | null | null | test/test_session.py | Sunmxt/UESTC-EAMS | 760a7387a5d73967e45a0b9d211acb383bb50fe1 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
'''
Test for session module
'''
import unittest
import uestc_eams
from .mock_server import LoginMockServer
from .utils import HookedMethod, MakeResponse
mock_login = LoginMockServer()
class TestSession(unittest.TestCase):
@mock_login.Patch
def test_Session(self):
self.__session = uestc_eams.EAMSSession()
# Try login
print('--> Login test <--')
self.assertTrue(self.__session.Login('2015070804011', '104728'))
self.assertTrue(mock_login.Logined)
self.assertEqual(mock_login.GetIndexCount, 1)
print('passed.', end = '\n\n')
# Test expire session
print('--> test expired cookies <--')
test_url = 'http://eams.uestc.edu.cn/eams'
mock_login.ExpireTestTiggered = False
rep = self.__session.TryRequestGet(test_url)
self.assertTrue(mock_login.ExpireTestTiggered)
self.assertTrue(mock_login.Logined)
self.assertNotEqual(-1, rep.url.find(test_url))
print('passed.', end = '\n\n')
# Test expire session with no redirects following
print('--> test expired cookies (no redirects following) <--')
test_url = 'http://eams.uestc.edu.cn/eams/redirect_test'
mock_login.ExpireTestTiggered = False
rep = self.__session.TryRequestGet(test_url, allow_redirects = False)
self.assertTrue(mock_login.ExpireTestTiggered)
self.assertTrue(mock_login.Logined)
self.assertNotEqual(-1, rep.url.find(test_url))
self.assertEqual(rep.status_code, 302)
print('passed.', end = '\n\n')
# Test expire session with HTTP 200 redirects.
print('--> test expired cookies (200 redirect) <--')
test_url = 'http://eams.uestc.edu.cn/eams/200redirect'
mock_login.ExpireTestTiggered = False
mock_login._200RedirectTiggered = False
rep = self.__session.TryRequestGet(test_url)
self.assertEqual(mock_login.ExpireTestTiggered, True)
self.assertEqual(mock_login._200RedirectTiggered, True)
print('passed.', end = '\n\n')
# Test expire session with redirect inside page.
print('--> test logout <--')
self.assertTrue(self.__session.Logout())
self.assertFalse(mock_login.Logined)
print('passed.', end = '\n\n')
| 34.731343 | 77 | 0.65578 | 261 | 2,327 | 5.678161 | 0.260536 | 0.091093 | 0.109312 | 0.077598 | 0.452092 | 0.441296 | 0.418354 | 0.418354 | 0.308367 | 0.233468 | 0 | 0.022087 | 0.221745 | 2,327 | 66 | 78 | 35.257576 | 0.796245 | 0.090675 | 0 | 0.395349 | 0 | 0 | 0.165951 | 0 | 0 | 0 | 0 | 0 | 0.325581 | 1 | 0.023256 | false | 0.116279 | 0.093023 | 0 | 0.139535 | 0.232558 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
450a6b5edd6e30d83bb61609d61f4702dee03bf9 | 23,457 | py | Python | hybrideb/_bineb.py | beckermr/hybrideb | a72d712020943dbbed35cb244f9e7f13fc6b2d4d | [
"BSD-3-Clause"
] | null | null | null | hybrideb/_bineb.py | beckermr/hybrideb | a72d712020943dbbed35cb244f9e7f13fc6b2d4d | [
"BSD-3-Clause"
] | null | null | null | hybrideb/_bineb.py | beckermr/hybrideb | a72d712020943dbbed35cb244f9e7f13fc6b2d4d | [
"BSD-3-Clause"
] | null | null | null | import sys
import numpy as np
import scipy.integrate
import scipy.special
from ._dblquad import dblquad
HAVE_PYGSL = False
try:
import pygsl.integrate
import pygsl.sf
HAVE_PYGSL = True
except ImportError:
pass
class BinEB(object):
def __init__(
self, tmin, tmax, Nb, windows=None, linear=False, useArcmin=True, fname=None
):
if fname is not None:
self.read_data(fname)
else:
# set basic params
if useArcmin:
am2r = np.pi / 180.0 / 60.0
else:
am2r = 1.0
self.Nb = Nb
self.L = tmin * am2r
self.H = tmax * am2r
if linear:
self.Lb = (self.H - self.L) / Nb * np.arange(Nb) + self.L
self.Hb = (self.H - self.L) / Nb * (np.arange(Nb) + 1.0) + self.L
else:
self.Lb = np.exp(np.log(self.H / self.L) / Nb * np.arange(Nb)) * self.L
self.Hb = (
np.exp(np.log(self.H / self.L) / Nb * (np.arange(Nb) + 1.0))
* self.L
)
self.have_ell_win = False
# make the bin window functions
if windows is None:
def _make_geomwin(L, H):
return lambda x: 2.0 * x / (H * H - L * L)
self.windows = []
for i in range(self.Nb):
self.windows.append(_make_geomwin(self.Lb[i], self.Hb[i]))
else:
def _make_normwin(winf, norm):
return lambda x: winf(x / am2r) / norm
self.windows = []
assert (
len(windows) == Nb
), "binEB requires as many windows as angular bins!"
for i in range(self.Nb):
twin = _make_normwin(windows[i], 1.0)
norm, err = scipy.integrate.quad(twin, self.Lb[i], self.Hb[i])
self.windows.append(_make_normwin(windows[i], norm))
# get fa and fb
self.fa = np.zeros(self.Nb)
self.fa[:] = 1.0
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def fb_int(x, args):
win = args[0]
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(fb_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.fb[i] = val
else:
def fb_int(x, win):
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
fb_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
self.fb[i] = val
self.fa_on = self.fa / np.sqrt(np.sum(self.fa * self.fa))
self.fb_on = self.fb - self.fa * np.sum(self.fa * self.fb) / np.sum(
self.fa * self.fa
)
self.fb_on = self.fb_on / np.sqrt(np.sum(self.fb_on * self.fb_on))
# get Mplus matrix
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def knorm_int(x, args):
win = args[0]
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(knorm_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, args):
win = args[0]
return win(x) / x / x
inv2 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv2_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv2[i] = val
def inv4_int(x, args):
win = args[0]
return win(x) / x / x / x / x
inv4 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv4_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv4[i] = val
else:
def knorm_int(x, win):
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
knorm_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, win):
return win(x) / x / x
inv2 = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
inv2_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
inv2[i] = val
def inv4_int(x, win):
return win(x) / x / x / x / x
inv4 = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
inv4_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
inv4[i] = val
if HAVE_PYGSL:
def _mp_int(p, args):
t = args[0]
k = args[1]
i = args[2]
if p > t:
val = (
(4.0 / p / p - 12.0 * t * t / p / p / p / p)
* self.windows[k](p)
* self.windows[i](t)
)
else:
val = 0.0
return val
else:
def _mp_int(p, t, k, i):
if p > t:
return (
(4.0 / p / p - 12.0 * t * t / p / p / p / p)
* self.windows[k](p)
* self.windows[i](t)
)
else:
return 0.0
self.mp = np.zeros((self.Nb, self.Nb))
for k in range(self.Nb):
# sys.stdout.write("|")
for i in range(self.Nb):
if windows is None:
if i < k:
self.mp[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
2.0
* (
self.Hb[i] * self.Hb[i]
- self.Lb[i] * self.Lb[i]
)
* np.log(self.Hb[k] / self.Lb[k])
+ 3.0
/ 2.0
* (
np.power(self.Hb[i], 4.0)
- np.power(self.Lb[i], 4.0)
)
* (
1.0 / self.Hb[k] / self.Hb[k]
- 1.0 / self.Lb[k] / self.Lb[k]
)
)
)
if k == i:
self.mp[k, i] += 1.0
self.mp[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
-0.5
* (
self.Hb[k] * self.Hb[k]
- self.Lb[k] * self.Lb[k]
)
- 2.0
* self.Lb[i]
* self.Lb[i]
* np.log(self.Hb[k] / self.Lb[k])
- 3.0
/ 2.0
* np.power(self.Lb[i], 4.0)
* (
1.0 / self.Hb[k] / self.Hb[k]
- 1.0 / self.Lb[k] / self.Lb[k]
)
)
)
else:
if k == i:
self.mp[k, i] += 1.0
val = dblquad(
_mp_int,
self.Lb[i],
self.Hb[i],
lambda x: self.Lb[k],
lambda x: self.Hb[k],
args=(k, i),
)
self.mp[k, i] += val / knorm[k]
if i < k:
self.mp[k, i] = (
4.0 * inv2[k] - 12.0 * inv4[k] * self.fb[i]
) / knorm[k]
# sys.stdout.write("\n")
if HAVE_PYGSL:
def _mm_int(p, args):
t = args[0]
k = args[1]
i = args[2]
if t > p:
val = (
(4.0 / t / t - 12.0 * p * p / t / t / t / t)
* self.windows[k](p)
* self.windows[i](t)
)
else:
val = 0.0
return val
else:
def _mm_int(p, t, k, i):
if t > p:
return (
(4.0 / t / t - 12.0 * p * p / t / t / t / t)
* self.windows[k](p)
* self.windows[i](t)
)
else:
return 0.0
self.mm = np.zeros((self.Nb, self.Nb))
for k in range(self.Nb):
# sys.stdout.write("|")
for i in range(self.Nb):
if windows is None:
if i > k:
self.mm[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
2.0
* (
self.Hb[k] * self.Hb[k]
- self.Lb[k] * self.Lb[k]
)
* np.log(self.Hb[i] / self.Lb[i])
+ 3.0
/ 2.0
* (
np.power(self.Hb[k], 4.0)
- np.power(self.Lb[k], 4.0)
)
* (
1.0 / self.Hb[i] / self.Hb[i]
- 1.0 / self.Lb[i] / self.Lb[i]
)
)
)
if k == i:
self.mm[k, i] += 1.0
self.mm[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
0.5
* (
-1.0 * self.Hb[k] * self.Hb[k]
+ self.Lb[k]
* self.Lb[k]
* (
4.0
- 3.0
* self.Lb[k]
* self.Lb[k]
/ self.Hb[i]
/ self.Hb[i]
- 4.0 * np.log(self.Hb[i] / self.Lb[k])
)
)
)
)
else:
if k == i:
self.mm[k, i] += 1.0
val = dblquad(
_mm_int,
self.Lb[i],
self.Hb[i],
lambda x: self.Lb[k],
lambda x: self.Hb[k],
args=(k, i),
)
self.mm[k, i] += val / knorm[k]
if i > k:
self.mm[k, i] = (
4.0 * inv2[i] - 12.0 * inv4[i] * self.fb[k]
) / knorm[k]
# sys.stdout.write("\n")
# compute the ell windows
self.comp_ell_windows()
def comp_ell_windows(self):
# get the windows in ell
self.have_ell_win = True
if HAVE_PYGSL:
def ellwin_int(theta, args):
ell = args[0]
win = args[1]
n = args[2]
return (pygsl.sf.bessel_Jn(n, ell * theta))[0] * win(theta)
else:
def ellwin_int(theta, ell, win, n):
return scipy.special.jn(n, ell * theta) * win(theta)
self.ellv = np.logspace(0.0, 5.5, 1500)
self.ellwindowsJ0 = np.zeros((self.Nb, len(self.ellv)))
self.ellwindowsJ4 = np.zeros((self.Nb, len(self.ellv)))
for i in range(self.Nb):
sys.stdout.write("|")
sys.stdout.flush()
if HAVE_PYGSL:
epsabs = 1e-6
epsrel = 1e-6
limit = 1000
w = pygsl.integrate.workspace(limit)
for j, ell in enumerate(self.ellv):
args = [ell, self.windows[i], 0]
f = pygsl.integrate.gsl_function(ellwin_int, args)
# code,val,err = pygsl.integrate.qag(
# f,self.Lb[i],self.Hb[i],epsabs,epsrel,
# limit,pygsl.integrate.GAUSS61,w
# )
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.ellwindowsJ0[i, j] = val
for j, ell in enumerate(self.ellv):
args = [ell, self.windows[i], 4]
f = pygsl.integrate.gsl_function(ellwin_int, args)
# code,val,err = pygsl.integrate.qag(
# f,self.Lb[i],self.Hb[i],epsabs,epsrel,limit,
# pygsl.integrate.GAUSS61,w
# )
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.ellwindowsJ4[i, j] = val
else:
win0 = np.array(
[
(
scipy.integrate.quad(
ellwin_int,
self.Lb[i],
self.Hb[i],
args=(ell, self.windows[i], 0),
)
)[0]
for ell in self.ellv
]
)
win4 = np.array(
[
(
scipy.integrate.quad(
ellwin_int,
self.Lb[i],
self.Hb[i],
args=(ell, self.windows[i], 4),
)
)[0]
for ell in self.ellv
]
)
self.ellwindowsJ0[i, :] = win0
self.ellwindowsJ4[i, :] = win4
sys.stdout.write("\n")
def write_data(self, fname):
"""
writes a simple text file with object info
# N L H
100 1.0 400.0
# Lb
1.0 1.2 ... 398.0
# Hb
1.2 1.4 ... 400.0
# fa
1.0 1.0 .... 1.0
# fb
blah blah ... blah
# fa_on
blah blah ... blah
# fb_on
blah blah ... blah
# invnorm
blah blah ... blah
# Mplus
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# Mminus
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# ellv
blah blah ... blah
# ellwinJ0
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# ellwinJ4
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
"""
def write_vec(fp, vec):
for val in vec:
fp.write("%.20lg " % val)
fp.write("\n#\n")
def write_mat(fp, mat):
shape = mat.shape
for i in range(shape[0]):
for val in mat[i, :]:
fp.write("%.20lg " % val)
fp.write("\n")
fp.write("#\n")
fp = open(fname, "w")
fp.write("# N L H\n")
fp.write("%ld %.20lg %.20lg\n" % (self.Nb, self.L, self.H))
fp.write("# Lb\n")
write_vec(fp, self.Lb)
fp.write("# Hb\n")
write_vec(fp, self.Hb)
fp.write("# fa\n")
write_vec(fp, self.fa)
fp.write("# fb\n")
write_vec(fp, self.fb)
fp.write("# fa_on\n")
write_vec(fp, self.fa_on)
fp.write("# fb_on\n")
write_vec(fp, self.fb_on)
fp.write("# invnorm\n")
write_vec(fp, self.invnorm)
fp.write("# Mplus\n")
write_mat(fp, self.mp)
fp.write("# Mminus\n")
write_mat(fp, self.mm)
fp.write("# ellv\n")
write_vec(fp, self.ellv)
fp.write("# ellwinJ0\n")
write_mat(fp, self.ellwindowsJ0)
fp.write("# ellwinJ4\n")
write_mat(fp, self.ellwindowsJ4)
fp.close()
def read_data(self, fname):
def read_vec(fp):
line = fp.readline()
line = line.strip()
val = np.array([float(tag) for tag in line.split()])
line = fp.readline()
return val
def read_mat(fp):
mat = []
line = fp.readline()
while line[0] != "#":
line = line.strip()
mat.append([float(tag) for tag in line.split()])
line = fp.readline()
mat = np.array(mat)
return mat
fp = open(fname, "r")
line = fp.readline()
line = fp.readline()
line = line.strip()
line = line.split()
self.Nb = int(line[0])
self.L = float(line[1])
self.H = float(line[2])
line = fp.readline()
self.Lb = read_vec(fp)
line = fp.readline()
self.Hb = read_vec(fp)
line = fp.readline()
self.fa = read_vec(fp)
line = fp.readline()
self.fb = read_vec(fp)
line = fp.readline()
self.fa_on = read_vec(fp)
line = fp.readline()
self.fb_on = read_vec(fp)
line = fp.readline()
self.invnorm = read_vec(fp)
line = fp.readline()
self.mp = read_mat(fp)
line = fp.readline()
self.mm = read_mat(fp)
line = fp.readline()
self.ellv = read_vec(fp)
line = fp.readline()
self.ellwindowsJ0 = read_mat(fp)
line = fp.readline()
self.ellwindowsJ4 = read_mat(fp)
self.have_ell_win = True
fp.close()
def fplusminus(self, fptest):
fp = fptest - np.sum(fptest * self.fa_on) * self.fa_on
fp = fp - np.sum(fp * self.fb_on) * self.fb_on
fm = np.dot(self.mp, fp)
"""
code to test
fm = np.zeros(len(fp))
for i in range(len(fp)):
for j in range(len(fp)):
fm[i] += self.mp[i,j]*fp[j]
print fm-np.dot(self.mp,fp)
"""
return fp, fm
def wplus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum + msum) * 0.5
def wminus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum - msum) * 0.5
def wplusminus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum + msum) * 0.5, (psum - msum) * 0.5
| 34.394428 | 87 | 0.336488 | 2,452 | 23,457 | 3.163948 | 0.077896 | 0.044084 | 0.03158 | 0.035447 | 0.689095 | 0.652874 | 0.595257 | 0.544341 | 0.51869 | 0.48595 | 0 | 0.026366 | 0.550497 | 23,457 | 681 | 88 | 34.444934 | 0.709408 | 0.042844 | 0 | 0.523901 | 0 | 0 | 0.009576 | 0 | 0 | 0 | 0 | 0 | 0.001912 | 1 | 0.053537 | false | 0.001912 | 0.015296 | 0.013384 | 0.116635 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
450aba433942ebcf2d5698d6bec5bdbf826e634d | 628 | py | Python | RecamanSequence/recaman_sequence.py | urosjevremovic/Recamans-Sequence | ab6a90c363271dc842f26ccd1b69168a9764de9e | [
"MIT"
] | null | null | null | RecamanSequence/recaman_sequence.py | urosjevremovic/Recamans-Sequence | ab6a90c363271dc842f26ccd1b69168a9764de9e | [
"MIT"
] | null | null | null | RecamanSequence/recaman_sequence.py | urosjevremovic/Recamans-Sequence | ab6a90c363271dc842f26ccd1b69168a9764de9e | [
"MIT"
] | null | null | null | import sys
from itertools import count, islice
def sequence():
"""Generate Recaman's sequence"""
seen = set()
a = 0
for n in count(1):
yield a
seen.add(a)
c = a - n
if c < 0 or c in seen:
c = a + n
a = c
def write_sequence(num):
"""Write Recaman's sequence to a text file"""
filename = "recaman.txt"
with open(filename, mode="wt", encoding="utf-8") as f:
f.writelines(f"{r}\n" for r in islice(sequence(), num))
def main():
write_sequence(num=int(sys.argv[1]))
if __name__ == '__main__':
write_sequence(num=int(sys.argv[1]))
| 20.258065 | 63 | 0.565287 | 97 | 628 | 3.546392 | 0.474227 | 0.127907 | 0.139535 | 0.116279 | 0.180233 | 0.180233 | 0.180233 | 0.180233 | 0 | 0 | 0 | 0.013453 | 0.289809 | 628 | 30 | 64 | 20.933333 | 0.757848 | 0.106688 | 0 | 0.1 | 1 | 0 | 0.056364 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
451354227c3d203ff804c452ae15b439b4e8924c | 1,587 | py | Python | BFS/70.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | BFS/70.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | BFS/70.py | wilbertgeng/LintCode_exercise | e7a343b746e98ca3b4bc7b36655af7291f3150db | [
"MIT"
] | null | null | null | """70 · Binary Tree Level Order Traversal II"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: A tree
@return: buttom-up level order a list of lists of integer
"""
def levelOrderBottom(self, root):
# write your code here
if not root:
return []
res = []
queue = collections.deque([root])
while queue:
temp = []
for _ in range(len(queue)):
node = queue.popleft()
temp.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
res.append(temp)
return res[::-1]
###
if not root:
return []
queue = [[root]]
index = 0
res = [[root.val]]
while index < len(queue):
curr_level = queue[index]
index += 1
next_level = []
next_level_vals = []
for node in curr_level:
if node.left:
next_level.append(node.left)
next_level_vals.append(node.left.val)
if node.right:
next_level.append(node.right)
next_level_vals.append(node.right.val)
if next_level:
queue.append(next_level)
res.append(next_level_vals)
return res[::-1]
| 24.415385 | 61 | 0.477001 | 171 | 1,587 | 4.315789 | 0.321637 | 0.109756 | 0.070461 | 0.04065 | 0.062331 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006543 | 0.42218 | 1,587 | 64 | 62 | 24.796875 | 0.797165 | 0.088847 | 0 | 0.263158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015625 | 0 | 1 | 0.026316 | false | 0 | 0 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
45138db5ed51843c9a5afaaec91f905c3ac8de23 | 671 | py | Python | results/views/sports.py | JukkaKarvonen/sal-kiti | 3dcff71552ab323e3c97eccf502c0d72eb683967 | [
"MIT"
] | 1 | 2021-06-12T08:46:32.000Z | 2021-06-12T08:46:32.000Z | results/views/sports.py | JukkaKarvonen/sal-kiti | 3dcff71552ab323e3c97eccf502c0d72eb683967 | [
"MIT"
] | 8 | 2020-07-01T15:06:52.000Z | 2022-02-20T09:11:23.000Z | results/views/sports.py | JukkaKarvonen/sal-kiti | 3dcff71552ab323e3c97eccf502c0d72eb683967 | [
"MIT"
] | 3 | 2020-03-01T17:02:24.000Z | 2020-07-05T14:37:59.000Z | from dry_rest_permissions.generics import DRYPermissions
from rest_framework import viewsets
from results.models.sports import Sport
from results.serializers.sports import SportSerializer
class SportViewSet(viewsets.ModelViewSet):
"""API endpoint for sports.
list:
Returns a list of all the existing sports.
retrieve:
Returns the given sport.
create:
Creates a new sport instance.
update:
Updates a given sport.
partial_update:
Updates a given sport.
destroy:
Removes the given sport.
"""
permission_classes = (DRYPermissions,)
queryset = Sport.objects.all()
serializer_class = SportSerializer
| 20.96875 | 56 | 0.728763 | 78 | 671 | 6.192308 | 0.564103 | 0.082816 | 0.05383 | 0.078675 | 0.099379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.214605 | 671 | 31 | 57 | 21.645161 | 0.916509 | 0.38152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
451695e3856e2d5dd4a42abbf9ad2c012826eaed | 792 | py | Python | komapy/decorators.py | bpptkg/komapy | a33fce5f4fbfacf085fd1f8043a57564be192a8d | [
"MIT"
] | null | null | null | komapy/decorators.py | bpptkg/komapy | a33fce5f4fbfacf085fd1f8043a57564be192a8d | [
"MIT"
] | null | null | null | komapy/decorators.py | bpptkg/komapy | a33fce5f4fbfacf085fd1f8043a57564be192a8d | [
"MIT"
] | null | null | null | from functools import partial
class counter:
"""
A counter decorator to track how many times a function is called.
"""
def __init__(self, func):
self.func = func
self.count = 0
def __call__(self, *args, **kwargs):
self.count += 1
return self.func(*args, **kwargs)
def register_as_decorator(func):
"""
Register extensions, transforms, or addons function as decorator.
"""
def wrapper(*args, **kwargs):
# If argument length < 2, user just provides function name without its
# resolver. So return partial function. Otherwise, return original
# function.
if len(args) < 2:
return partial(func, *args, **kwargs)
return partial(func, *args, **kwargs)()
return wrapper
| 26.4 | 78 | 0.616162 | 94 | 792 | 5.085106 | 0.521277 | 0.104603 | 0.087866 | 0.087866 | 0.125523 | 0.125523 | 0 | 0 | 0 | 0 | 0 | 0.007018 | 0.280303 | 792 | 29 | 79 | 27.310345 | 0.831579 | 0.348485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0 | 0.071429 | 0 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4516fa710b28e684423724f2bca16759c34404c0 | 5,883 | py | Python | Applications/Examples/python/market_price_authentication.py | Refinitiv/websocket-api | 15a5957510d2bb246cbbf65ed999ff0089b3a65d | [
"Apache-2.0"
] | 36 | 2019-01-08T17:43:38.000Z | 2022-03-11T21:59:58.000Z | Applications/Examples/python/market_price_authentication.py | thomsonreuters/websocket-api | 52c940a01d40a6c073d35922d8214d927327caa4 | [
"Apache-2.0"
] | 14 | 2019-12-27T15:58:12.000Z | 2021-11-03T21:39:27.000Z | Applications/Examples/python/market_price_authentication.py | thomsonreuters/websocket-api | 52c940a01d40a6c073d35922d8214d927327caa4 | [
"Apache-2.0"
] | 28 | 2019-01-22T21:43:15.000Z | 2022-03-29T11:43:05.000Z | #|-----------------------------------------------------------------------------
#| This source code is provided under the Apache 2.0 license --
#| and is provided AS IS with no warranty or guarantee of fit for purpose. --
#| See the project's LICENSE.md for details. --
#| Copyright (C) 2017-2020 Refinitiv. All rights reserved. --
#|-----------------------------------------------------------------------------
#!/usr/bin/env python
""" Simple example of outputting Market Price JSON data using Websockets with authentication """
import sys
import time
import getopt
import requests
import socket
import json
import websocket
import threading
from threading import Thread, Event
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Global Default Variables
app_id = '555'
auth_hostname = '127.0.0.1'
auth_port = '8443'
hostname = '127.0.0.1'
password = ''
position = socket.gethostbyname(socket.gethostname())
token = ''
user = ''
port = '15000'
# Global Variables
web_socket_app = None
web_socket_open = False
def process_message(ws, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
process_login_response(ws, message_json)
elif message_type == "Ping":
pong_json = { 'Type':'Pong' }
ws.send(json.dumps(pong_json))
print("SENT:")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))
def process_login_response(ws, message_json):
""" Send item request """
send_market_price_request(ws)
def send_market_price_request(ws):
""" Create and send simple Market Price request """
mp_req_json = {
'ID': 2,
'Key': {
'Name': 'TRI.N',
},
}
ws.send(json.dumps(mp_req_json))
print("SENT:")
print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))
def on_message(ws, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED: ")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
process_message(ws, singleMsg)
def on_error(ws, error):
""" Called when websocket error has occurred """
print(error)
def on_close(ws, close_status_code, close_msg):
""" Called when websocket is closed """
global web_socket_open
web_socket_open = False
print("WebSocket Closed")
def on_open(ws):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected!")
global web_socket_open
web_socket_open = True
if __name__ == "__main__":
# Get command line parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "hostname=", "port=", "app_id=", "user=", "password=", "position=", "auth_hostname=", "auth_port="])
except getopt.GetoptError:
print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]')
sys.exit(2)
for opt, arg in opts:
if opt in ("--help"):
print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]')
sys.exit(0)
elif opt in ("--hostname"):
hostname = arg
elif opt in ("--port"):
port = arg
elif opt in ("--app_id"):
app_id = arg
elif opt in ("--user"):
user = arg
elif opt in ("--password"):
password = arg
elif opt in ("--position"):
position = arg
elif opt in ("--auth_hostname"):
auth_hostname = arg
elif opt in ("--auth_port"):
auth_port = arg
# Send login info for authentication token
print("Sending authentication request...")
r = requests.post('https://{}:{}/getToken'.format(auth_hostname, auth_port),
data={'username': user, 'password': password},
verify=True)
auth_json = r.json()
print("RECEIVED:")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
if auth_json['success'] is True:
token = r.cookies['AuthToken']
print('Authentication Succeeded. Received AuthToken: {}'.format(token))
cookie = "AuthToken={};AuthPosition={};applicationId={};".format(token, position, app_id)
# Start websocket handshake
ws_address = "ws://{}:{}/WebSocket".format(hostname, port)
print("Connecting to WebSocket " + ws_address + " ...")
web_socket_app = websocket.WebSocketApp(ws_address, on_message=on_message,
on_error=on_error,
on_close=on_close,
subprotocols=['tr_json2'],
cookie=cookie)
web_socket_app.on_open = on_open
# Event loop
wst = threading.Thread(target=web_socket_app.run_forever)
wst.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
web_socket_app.close()
else:
print('Authentication failed')
| 34.810651 | 235 | 0.590345 | 670 | 5,883 | 5.001493 | 0.292537 | 0.026858 | 0.021486 | 0.025067 | 0.239033 | 0.192778 | 0.156968 | 0.118174 | 0.096688 | 0.096688 | 0 | 0.010546 | 0.258542 | 5,883 | 168 | 236 | 35.017857 | 0.75768 | 0.17882 | 0 | 0.103448 | 0 | 0.017241 | 0.213417 | 0.022222 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060345 | false | 0.060345 | 0.086207 | 0 | 0.146552 | 0.146552 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
4517ac136f86ccb5533a40509e2b215d308bd04d | 571 | py | Python | cardDao.py | Blueredemption/Inventory | 8d61671071f89b51b3e34c5eb673200fc8baffc0 | [
"MIT"
] | null | null | null | cardDao.py | Blueredemption/Inventory | 8d61671071f89b51b3e34c5eb673200fc8baffc0 | [
"MIT"
] | null | null | null | cardDao.py | Blueredemption/Inventory | 8d61671071f89b51b3e34c5eb673200fc8baffc0 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
class CardDao():
def __init__(self): # constructor
super().__init__()
self.create()
self.return()
self.update()
self.delete()
self.populate()
def create(self): # there will be create for cards
print('Create')
def return(self):
print('Read')
def update(self):
print('Update')
def delete(self):
print('Delete')
def populate(self):
print('Populate')
def main():
run = CardDao()
if __name__ == '__main__':
main() | 16.794118 | 54 | 0.528897 | 60 | 571 | 4.766667 | 0.433333 | 0.125874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002611 | 0.329247 | 571 | 34 | 55 | 16.794118 | 0.744125 | 0.105079 | 0 | 0 | 0 | 0 | 0.074656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.227273 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
451aa8ccde2d865dd652ad209fefdf68afe0ad46 | 2,820 | py | Python | streamlit_app.py | guim4dev/education-cv | ffd880090de28e36849b4d53c424c2009791aaf5 | [
"MIT"
] | null | null | null | streamlit_app.py | guim4dev/education-cv | ffd880090de28e36849b4d53c424c2009791aaf5 | [
"MIT"
] | null | null | null | streamlit_app.py | guim4dev/education-cv | ffd880090de28e36849b4d53c424c2009791aaf5 | [
"MIT"
] | null | null | null | import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
st.title("Relatório de Aula")
df = pd.read_csv('data/emocoes.csv')
agg = pd.read_csv('data/agg.csv')
Engajado = df[df['emocao'] == 'Engajado']
Engajado_agg = Engajado.groupby(['emocao', 'pessoa']).size().reset_index(name='size')
Engajado_agg = Engajado_agg.sort_values(by=['size'], ascending=False)
emotions_count = df.value_counts('emocao').reset_index()
def is_authenticated(password):
return password == "182916f6-756d-40d6-95fc-3283ba5efdf8"
def generate_time_agg_graph():
fig = px.line(agg, x="tempo", y="size", labels= { 'tempo': 'tempo (s)',
'size': 'número de alunos' }, color='emocao', title='Emoções ao longo do tempo')
st.plotly_chart(fig, use_container_width=True)
def generate_top_students():
st.markdown('<br/>', unsafe_allow_html=True)
st.markdown("<center style='font-size:2em'=>Alunos Mais Engajados</center>", unsafe_allow_html=True)
top_three = Engajado_agg.head(3).to_numpy()
for row in top_three:
st.markdown(f"<center><span style='color:#00FF00;font-size:1.5em'>{row[1]}</span></center>", unsafe_allow_html=True)
st.markdown('<br/>', unsafe_allow_html=True)
def generate_bottom_students():
st.markdown("<center style='font-size:2em'>Alunos Menos Engajados</center>", unsafe_allow_html=True)
bottom_three = np.flip(Engajado_agg.tail(3).to_numpy(), 0)
for row in bottom_three:
st.write(f"<center><span style='color:red;font-size:1.5em'>{row[1]}</span></center>", unsafe_allow_html=True)
st.markdown('<br/> <br/>', unsafe_allow_html=True)
def generate_emotions_pizza():
fig = px.pie(emotions_count, values=emotions_count.index, names='emocao', title='Predominância de Emoções')
st.plotly_chart(fig, use_container_width=True)
def generate_login_block():
block1 = st.empty()
block2 = st.empty()
return block1, block2
def clean_blocks(blocks):
for block in blocks:
block.empty()
def graph_columns():
generate_time_agg_graph()
generate_top_students()
generate_bottom_students()
generate_emotions_pizza()
def login(blocks):
return blocks[1].text_input('ID da Aula')
login_blocks = generate_login_block()
password = login(login_blocks)
drive_block = st.empty()
google_drive = drive_block.text_input('Link da aula para processamento', '')
id_block = st.empty()
if google_drive != '':
drive_block.empty()
id_block.text("ID da Aula processada: 182916f6-756d-40d6-95fc-3283ba5efdf8")
if is_authenticated(password):
id_block.empty()
drive_block.empty()
clean_blocks(login_blocks)
st.balloons()
graph_columns()
elif password:
st.info("Aula não encontrada. Por favor, insira um ID válido.") | 32.413793 | 134 | 0.699291 | 401 | 2,820 | 4.713217 | 0.336658 | 0.040741 | 0.055556 | 0.07037 | 0.292593 | 0.236508 | 0.200529 | 0.151323 | 0.111111 | 0.111111 | 0 | 0.026018 | 0.154965 | 2,820 | 87 | 135 | 32.413793 | 0.7671 | 0 | 0 | 0.095238 | 0 | 0.031746 | 0.236795 | 0.088267 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0.079365 | 0.063492 | 0.031746 | 0.253968 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
451d32ddace64c14dc2a20c09b0af3249bd93791 | 676 | py | Python | api/db/models/child_datum.py | peuan-testai/opentestdata-api | 9e9b12e73abc30a2031eb49d51d5b9d5412ed6ba | [
"MIT"
] | 15 | 2019-06-27T02:48:02.000Z | 2020-11-29T09:01:29.000Z | api/db/models/child_datum.py | peuan-testai/opentestdata-api | 9e9b12e73abc30a2031eb49d51d5b9d5412ed6ba | [
"MIT"
] | 16 | 2019-07-26T19:51:55.000Z | 2022-03-12T00:00:24.000Z | api/db/models/child_datum.py | peuan-testai/opentestdata-api | 9e9b12e73abc30a2031eb49d51d5b9d5412ed6ba | [
"MIT"
] | 7 | 2019-06-26T11:10:50.000Z | 2020-09-04T08:52:58.000Z | from .. import db
from .base import BaseModel
class ChildDatum(BaseModel):
__tablename__ = 'child_data'
# fields
parent_id = db.Column(db.Integer, db.ForeignKey('data.id'), nullable=False)
datum_id = db.Column(db.Integer, db.ForeignKey('data.id'), nullable=False)
name = db.Column(db.String(length=100), nullable=False)
# relationships
parent = db.relationship('Datum', back_populates='children', foreign_keys=[parent_id])
datum = db.relationship('Datum', back_populates='included_in', foreign_keys=[datum_id])
def __repr__(self):
return (
"<ChildDatum '%s' of %s>" %
(self.name, self.parent)
)
| 30.727273 | 91 | 0.659763 | 83 | 676 | 5.156627 | 0.457831 | 0.056075 | 0.070093 | 0.056075 | 0.383178 | 0.233645 | 0.233645 | 0.233645 | 0.233645 | 0.233645 | 0 | 0.005535 | 0.198225 | 676 | 21 | 92 | 32.190476 | 0.784133 | 0.029586 | 0 | 0 | 0 | 0 | 0.116386 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0.071429 | 0.785714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
451de10c0477bdaf31e0d063879d50b5418e6b0b | 490 | py | Python | catkin_ws/src/ros_python/function_ws/srv_sub_pub/src/srv_server.py | min-chuir-Park/ROS_Tutorials | 4c19e7673ec7098019c747833c45f0d32b85dab4 | [
"MIT"
] | 1 | 2019-07-04T04:49:05.000Z | 2019-07-04T04:49:05.000Z | catkin_ws/src/ros_python/function_ws/srv_sub_pub/src/srv_server.py | min-chuir-Park/ROS_Tutorials | 4c19e7673ec7098019c747833c45f0d32b85dab4 | [
"MIT"
] | null | null | null | catkin_ws/src/ros_python/function_ws/srv_sub_pub/src/srv_server.py | min-chuir-Park/ROS_Tutorials | 4c19e7673ec7098019c747833c45f0d32b85dab4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from srv_sub_pub.srv import *
NAME = "add_two_ints_server"
def add_two_ints(req):
print("Returning [%s + %s = %s]" % (req.a, req.b, (req.a + req.b)))
return AddTwoIntsResponse(req.a + req.b)
def add_two_ints_server():
rospy.init_node(NAME)
s = rospy.Service('add_two_ints', AddTwoInts, add_two_ints)
# spin() keeps Python from exiting until node is shutdown
rospy.spin()
if __name__ == "__main__":
add_two_ints_server()
| 22.272727 | 71 | 0.681633 | 78 | 490 | 3.948718 | 0.461538 | 0.116883 | 0.194805 | 0.155844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.179592 | 490 | 21 | 72 | 23.333333 | 0.766169 | 0.155102 | 0 | 0 | 0 | 0 | 0.152913 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.416667 | 0.083333 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4526f09b63533011d0dbd7fc3b49ed217cae0f86 | 8,171 | py | Python | third-party/webscalesqlclient/mysql-5.6/xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/xb_partial_test.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | 2 | 2018-03-07T08:31:29.000Z | 2019-02-01T10:10:48.000Z | third-party/webscalesqlclient/mysql-5.6/xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/xb_partial_test.py | hkirsman/hhvm_centos7_builds | 2a1fd6de0d2d289c1575f43f10018f3bec23bb13 | [
"PHP-3.01",
"Zend-2.0"
] | 1 | 2021-02-23T14:52:22.000Z | 2021-02-23T14:52:22.000Z | xtrabackup_main/xb_partial_test.py | isabella232/kewpie | 47d67124fa755719eda3ca5a621a2abf0322d3f9 | [
"Apache-2.0"
] | 1 | 2020-11-13T10:17:28.000Z | 2020-11-13T10:17:28.000Z | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [['--innodb-file-per-table']]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, 'full_backup')
inc_backup_path = os.path.join(master_server.vardir, 'inc_backup')
# remove backup paths
for del_path in [backup_path, inc_backup_path]:
if os.path.exists(del_path):
shutil.rmtree(del_path)
def load_table(self, table_name, row_count, server):
queries = []
for i in range(row_count):
queries.append("INSERT INTO %s VALUES (%d, %d)" %(table_name,i, row_count))
retcode, result = self.execute_queries(queries, server)
self.assertEqual(retcode, 0, msg=result)
def test_xb_partial(self):
self.servers = servers
logging = test_executor.logging
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, 'full_backup')
inc_backup_path = os.path.join(master_server.vardir, 'inc_backup')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
table_name = "`test`"
# populate our server with a test bed
queries = ["DROP TABLE IF EXISTS %s" %(table_name)
,("CREATE TABLE %s "
"(`a` int(11) DEFAULT NULL, "
"`number` int(11) DEFAULT NULL) "
" ENGINE=InnoDB DEFAULT CHARSET=latin1 "
%(table_name)
)
]
retcode, result = self.execute_queries(queries, master_server)
self.assertEqual(retcode, 0, msg = result)
row_count = 100
self.load_table(table_name, row_count, master_server)
# Additional tables via randgen
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
#self.assertEqual(retcode, 0, msg=output)
# take a backup
cmd = [ xtrabackup
, "--defaults-file=%s" %master_server.cnf_file
, "--datadir=%s" %master_server.datadir
, "--backup"
, '--tables="^test[.]test|DD"'
, "--target-dir=%s" %backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# Get a checksum for our `test` table
query = "CHECKSUM TABLE %s" %table_name
retcode, orig_checksum1 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=result)
logging.test_debug("Original checksum1: %s" %orig_checksum1)
# Get a checksum for our `DD` table
query = "CHECKSUM TABLE DD"
retcode, orig_checksum2 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=result)
logging.test_debug("Original checksum2: %s" %orig_checksum2)
# Clear our table so we know the backup restored
for del_table in [table_name,'DD']:
query = "DELETE FROM %s" %del_table
retcode, result = self.execute_query(query,master_server)
self.assertEqual(retcode, 0, result)
# Remove old tables
for table in ['A','AA','B','BB','C','CC','D']:
query = "DROP TABLE %s" %table
retcode, result = self.execute_query(query,master_server)
self.assertEqual(retcode,0,result)
# shutdown our server
master_server.stop()
# do final prepare on main backup
cmd = [ xtrabackup
, "--prepare"
, "--datadir=%s" %master_server.datadir
, "--use-memory=500M"
, "--target-dir=%s" %backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# copy our data files back
for root, dirs, files in os.walk(backup_path):
if files:
file_info = root.split(backup_path)[1]
for file_name in files:
# We do a quick check to make sure
# no names start with '/' as os.path
# throws a hissy when it sees such things
if file_info.startswith('/'):
file_info = file_info[1:]
if file_name.startswith('/'):
file_name = file_name[1:]
to_path = os.path.join(master_server.datadir
, file_info
, file_name)
new_dir = os.path.dirname(to_path)
try:
if not os.path.exists(new_dir):
os.makedirs(new_dir)
except OSError, e:
logging.error("Could not create directory: %s | %s" %(new_dir, e))
try:
shutil.copy(os.path.join(root,file_name),to_path)
except IOError, e:
logging.error( "ERROR: Could not copy file: %s | %s" %(file_name, e))
# restart server (and ensure it doesn't crash)
master_server.start()
self.assertTrue(master_server.status==1, 'Server failed restart from restored datadir...')
# Get a checksum for our test table
query = "CHECKSUM TABLE %s" %table_name
retcode, restored_checksum1 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=result)
logging.test_debug("Restored checksum1: %s" %restored_checksum1)
self.assertEqual(orig_checksum1, restored_checksum1, msg = "Orig: %s | Restored: %s" %(orig_checksum1, restored_checksum1))
# Get a checksum for our DD table
query = "CHECKSUM TABLE DD"
retcode, restored_checksum2 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=result)
logging.test_debug("Restored checksum1: %s" %restored_checksum2)
self.assertEqual(orig_checksum2, restored_checksum2, msg = "Orig: %s | Restored: %s" %(orig_checksum2, restored_checksum2))
| 45.648045 | 135 | 0.571778 | 938 | 8,171 | 4.837953 | 0.282516 | 0.06082 | 0.041648 | 0.055531 | 0.390921 | 0.373292 | 0.32922 | 0.305421 | 0.295725 | 0.295725 | 0 | 0.012128 | 0.333986 | 8,171 | 178 | 136 | 45.904494 | 0.821757 | 0.190552 | 0 | 0.293103 | 0 | 0 | 0.12053 | 0.012479 | 0 | 0 | 0 | 0 | 0.112069 | 0 | null | null | 0 | 0.025862 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
452e242fef5c444f6a84742a55e2adf53a8f64d3 | 9,907 | py | Python | algofi/v1/staking.py | zhengxunWu3/algofi-py-sdk | 8388d71d55eae583ac3579286b5f870aa3db2913 | [
"MIT"
] | null | null | null | algofi/v1/staking.py | zhengxunWu3/algofi-py-sdk | 8388d71d55eae583ac3579286b5f870aa3db2913 | [
"MIT"
] | null | null | null | algofi/v1/staking.py | zhengxunWu3/algofi-py-sdk | 8388d71d55eae583ac3579286b5f870aa3db2913 | [
"MIT"
] | null | null | null | from algosdk import logic
from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn
from ..contract_strings import algofi_manager_strings as manager_strings
from .prepend import get_init_txns
from ..utils import TransactionGroup, Transactions, randint, int_to_bytes
OPT_IN_MIN_BALANCE=0.65
def prepare_staking_contract_optin_transactions(manager_app_id, market_app_id, sender, storage_address, suggested_params):
"""Returns a :class:`TransactionGroup` object representing a staking contract opt in
group transaction. The sender and storage account opt in to the staking application
and the storage account is rekeyed to the manager account address, rendering it
unable to be transacted against by the sender and therefore immutable.
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param max_atomic_opt_in_market_app_ids: max opt in market app ids
:type max_atomic_opt_in_market_app_ids: list
:param sender: account address for the sender
:type sender: string
:param storage_address: address of the storage account
:type storage_address: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:return: :class:`TransactionGroup` object representing a manager opt in group transaction
:rtype: :class:`TransactionGroup`
"""
txn_payment = PaymentTxn(
sender=sender,
sp=suggested_params,
receiver=storage_address,
amt=int(OPT_IN_MIN_BALANCE*1e6)
)
txn_market = ApplicationOptInTxn(
sender=storage_address,
sp=suggested_params,
index=market_app_id
)
txn_user_opt_in_manager = ApplicationOptInTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id
)
app_address = logic.get_application_address(manager_app_id)
txn_storage_opt_in_manager = ApplicationOptInTxn(
sender=storage_address,
sp=suggested_params,
index=manager_app_id,
rekey_to=app_address
)
txn_group = TransactionGroup([txn_payment, txn_market, txn_user_opt_in_manager, txn_storage_opt_in_manager])
return txn_group
def prepare_stake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, market_address, oracle_app_id, asset_id=None):
"""Returns a :class:`TransactionGroup` object representing a stake
transaction against the algofi protocol. The sender sends assets to the
staking account and is credited with a stake.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param amount: amount of asset to supply for minting collateral
:type amount: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the asset market application
:type market_app_id: int
:param market_address: account address for the market application
:type market_address: string
:param oracle_app_id: id of the asset market application
:type oracle_app_id: int
:param asset_id: asset id of the asset being supplied, defaults to None (algo)
:type asset_id: int, optional
:return: :class:`TransactionGroup` object representing a mint to collateral group transaction
:rtype: :class:`TransactionGroup`
"""
supported_oracle_app_ids = [oracle_app_id]
supported_market_app_ids = [market_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.MINT_TO_COLLATERAL,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.mint_to_collateral.encode()],
)
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.mint_to_collateral.encode()],
foreign_apps=[manager_app_id],
accounts=[storage_account]
)
if asset_id:
txn2 = AssetTransferTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount,
index=asset_id
)
else:
txn2 = PaymentTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount
)
txn_group = TransactionGroup(prefix_transactions + [txn0, txn1, txn2])
return txn_group
def prepare_unstake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, oracle_app_id, asset_id=None):
"""Returns a :class:`TransactionGroup` object representing a remove stake
group transaction against the algofi protocol. The sender requests to remove stake
from a stake acount and if successful, the stake is removed.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param amount: amount of collateral to remove from the market
:type amount: int
:param asset_id: asset id of the asset underlying the collateral
:type asset_id: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the market application of the collateral
:type market_app_id: int
:param oracle_app_id: id of the oracle application of the collateral
:type oracle_app_id: int
:return: :class:`TransactionGroup` object representing a remove collateral underlying group transaction
:rtype: :class:`TransactionGroup`
"""
supported_market_app_ids = [market_app_id]
supported_oracle_app_ids = [oracle_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.REMOVE_COLLATERAL_UNDERLYING,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode(), int_to_bytes(amount)]
)
if asset_id:
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode()],
foreign_apps=[manager_app_id],
foreign_assets=[asset_id],
accounts=[storage_account]
)
else:
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode()],
foreign_apps=[manager_app_id],
accounts=[storage_account]
)
txn_group = TransactionGroup(prefix_transactions + [txn0, txn1])
return txn_group
def prepare_claim_staking_rewards_transactions(sender, suggested_params, storage_account, manager_app_id, market_app_id, oracle_app_id, foreign_assets):
"""Returns a :class:`TransactionGroup` object representing a claim rewards
underlying group transaction against the algofi protocol. The sender requests
to claim rewards from the manager acount. If not, the account sends
back the user the amount of asset underlying their posted collateral.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the market application of the collateral
:type market_app_id: int
:param oracle_app_id: id of the oracle application
:type oracle_app_id: int
:param foreign_assets: list of rewards assets in the staking contract
:type foreign_assets: list
:return: :class:`TransactionGroup` object representing a claim rewards transaction
:rtype: :class:`TransactionGroup`
"""
supported_market_app_ids = [market_app_id]
supported_oracle_app_ids = [oracle_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.CLAIM_REWARDS,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.claim_rewards.encode()],
accounts=[storage_account],
foreign_assets=foreign_assets
)
txn_group = TransactionGroup(prefix_transactions + [txn0])
return txn_group | 43.262009 | 159 | 0.731402 | 1,225 | 9,907 | 5.620408 | 0.106939 | 0.040668 | 0.047059 | 0.013072 | 0.747567 | 0.708206 | 0.659114 | 0.580683 | 0.562963 | 0.529702 | 0 | 0.00242 | 0.207631 | 9,907 | 229 | 160 | 43.262009 | 0.87465 | 0.42253 | 0 | 0.617647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.036765 | 0 | 0.095588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4535c1a7513cb60d8687c9c277406f75c8762e19 | 2,039 | py | Python | tests/test_ProtocolService/test_ProtocolService.py | danilocgsilva/awsinstances | c0ab6ae42b3bfbe94735f7ba4741b3facec271ce | [
"MIT"
] | null | null | null | tests/test_ProtocolService/test_ProtocolService.py | danilocgsilva/awsinstances | c0ab6ae42b3bfbe94735f7ba4741b3facec271ce | [
"MIT"
] | null | null | null | tests/test_ProtocolService/test_ProtocolService.py | danilocgsilva/awsinstances | c0ab6ae42b3bfbe94735f7ba4741b3facec271ce | [
"MIT"
] | null | null | null | import unittest
import sys
sys.path.insert(2, "..")
from awsec2instances_includes.ProtocolService import ProtocolService
class test_ProtocolService(unittest.TestCase):
def test_execption_wrong_argument(self):
wrong_argument = "some-invalid"
with self.assertRaises(Exception):
ProtocolService(wrong_argument)
def test_get_zero_element_string(self):
protocolServoce = ProtocolService("")
self.assertEqual(0, len(protocolServoce.get_ports()))
def test_get_zero_element_none(self):
protocolServoce = ProtocolService()
self.assertEqual(0, len(protocolServoce.get_ports()))
def test_port_both_options(self):
protocolService = ProtocolService("with-ssh,with-http")
returned_ports = protocolService.get_ports()
self.assertEqual(22, returned_ports[0])
self.assertEqual(80, returned_ports[1])
def test_port_three_options(self):
protocolService = ProtocolService("with-ssh,with-http,with-database")
returned_ports = protocolService.get_ports()
self.assertEqual(22, returned_ports[0])
self.assertEqual(80, returned_ports[1])
self.assertEqual(3306, returned_ports[2])
def test_one_option_wrong(self):
one_option_wrong = "with-ssh,with-cassandra"
with self.assertRaises(Exception):
ProtocolService(one_option_wrong)
def test_three_options_one_wrong(self):
three_options = "with-ssh,with-http,with-cassandra"
with self.assertRaises(Exception):
ProtocolService(three_options)
def test_is_not_empty_false(self):
protocolService = ProtocolService()
self.assertFalse(protocolService.is_not_empty())
def test_is_not_empty_true(self):
protocolService = ProtocolService("with-ssh")
self.assertTrue(protocolService.is_not_empty())
def test_is_not_empty_true2(self):
protocolService = ProtocolService("with-ssh,with-http")
self.assertTrue(protocolService.is_not_empty())
| 36.410714 | 77 | 0.714076 | 228 | 2,039 | 6.109649 | 0.245614 | 0.050251 | 0.043073 | 0.109117 | 0.667624 | 0.555635 | 0.517588 | 0.400574 | 0.320172 | 0.259871 | 0 | 0.013309 | 0.189308 | 2,039 | 55 | 78 | 37.072727 | 0.829401 | 0 | 0 | 0.348837 | 0 | 0 | 0.071639 | 0.04318 | 0 | 0 | 0 | 0 | 0.302326 | 1 | 0.232558 | false | 0 | 0.069767 | 0 | 0.325581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
453972bee5e4b38dcaee26d48c6dcec6950939dd | 821 | py | Python | custom_uss/custom_widgets/outlog.py | shuanet/dss | 5daafeb89aac58e4614775f301bec920f4abfa24 | [
"Apache-2.0"
] | 2 | 2022-02-13T19:13:16.000Z | 2022-02-17T14:52:05.000Z | custom_uss/custom_widgets/outlog.py | shuanet/dss | 5daafeb89aac58e4614775f301bec920f4abfa24 | [
"Apache-2.0"
] | null | null | null | custom_uss/custom_widgets/outlog.py | shuanet/dss | 5daafeb89aac58e4614775f301bec920f4abfa24 | [
"Apache-2.0"
] | 1 | 2022-02-16T20:17:38.000Z | 2022-02-16T20:17:38.000Z | import sys
from PySide6 import QtGui
class OutLog:
def __init__(self, edit, out=None, color=None):
"""(edit, out=None, color=None) -> can write stdout, stderr to a
QTextEdit.
edit = QTextEdit
out = alternate stream ( can be the original sys.stdout )
color = alternate color (i.e. color stderr a different color)
"""
self.edit = edit
self.out = None
self.color = color
def write(self, m):
if self.color:
tc = self.edit.textColor()
self.edit.setTextColor(self.color)
self.edit.moveCursor(QtGui.QTextCursor.End)
self.edit.insertPlainText( m )
if self.color:
self.edit.setTextColor(tc)
if self.out:
self.out.write(m)
def flush(self):
pass
| 25.65625 | 72 | 0.576127 | 102 | 821 | 4.598039 | 0.392157 | 0.119403 | 0.083156 | 0.06823 | 0.085288 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001802 | 0.323995 | 821 | 31 | 73 | 26.483871 | 0.843243 | 0.254568 | 0 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0.052632 | 0.105263 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
453b632b266da30271e1e4710f1d5bea075bf4fb | 1,937 | py | Python | cluster/image/pro_seafile_7.1/scripts_7.1/start.py | chaosbunker/seafile-docker | 560d982d8cd80a20508bf616abc0dc741d7b5d84 | [
"Apache-2.0"
] | 503 | 2015-11-11T22:07:36.000Z | 2022-03-28T21:29:30.000Z | cluster/image/pro_seafile_7.1/scripts_7.1/start.py | chaosbunker/seafile-docker | 560d982d8cd80a20508bf616abc0dc741d7b5d84 | [
"Apache-2.0"
] | 209 | 2015-07-13T04:49:38.000Z | 2022-03-25T22:06:18.000Z | cluster/image/pro_seafile_7.1/scripts_7.1/start.py | chaosbunker/seafile-docker | 560d982d8cd80a20508bf616abc0dc741d7b5d84 | [
"Apache-2.0"
] | 195 | 2015-07-09T18:11:47.000Z | 2022-03-25T11:56:53.000Z | #!/usr/bin/env python3
#coding: UTF-8
import os
import sys
import time
import json
import argparse
from os.path import join, exists, dirname
from upgrade import check_upgrade
from utils import call, get_conf, get_script, get_command_output, get_install_dir
installdir = get_install_dir()
topdir = dirname(installdir)
def watch_controller():
maxretry = 4
retry = 0
while retry < maxretry:
controller_pid = get_command_output('ps aux | grep seafile-controller | grep -v grep || true').strip()
garbage_collector_pid = get_command_output('ps aux | grep /scripts/gc.sh | grep -v grep || true').strip()
if not controller_pid and not garbage_collector_pid:
retry += 1
else:
retry = 0
time.sleep(5)
print('seafile controller exited unexpectedly.')
sys.exit(1)
def main(args):
call('/scripts/create_data_links.sh')
# check_upgrade()
os.chdir(installdir)
call('service nginx start &')
admin_pw = {
'email': get_conf('SEAFILE_ADMIN_EMAIL', 'me@example.com'),
'password': get_conf('SEAFILE_ADMIN_PASSWORD', 'asecret'),
}
password_file = join(topdir, 'conf', 'admin.txt')
with open(password_file, 'w+') as fp:
json.dump(admin_pw, fp)
try:
call('{} start'.format(get_script('seafile.sh')))
call('{} start'.format(get_script('seahub.sh')))
if args.mode == 'backend':
call('{} start'.format(get_script('seafile-background-tasks.sh')))
finally:
if exists(password_file):
os.unlink(password_file)
print('seafile server is running now.')
try:
watch_controller()
except KeyboardInterrupt:
print('Stopping seafile server.')
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Seafile cluster start script')
parser.add_argument('--mode')
main(parser.parse_args())
| 29.348485 | 113 | 0.653588 | 246 | 1,937 | 4.947154 | 0.455285 | 0.029581 | 0.039441 | 0.044371 | 0.146261 | 0.09696 | 0.046015 | 0 | 0 | 0 | 0 | 0.005968 | 0.221477 | 1,937 | 65 | 114 | 29.8 | 0.801061 | 0.025813 | 0 | 0.076923 | 0 | 0 | 0.2431 | 0.041401 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0.096154 | 0.153846 | 0 | 0.192308 | 0.057692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
453c20b8c1cf91ca7912ad336c0a4f1a000e5011 | 4,024 | py | Python | tubee/utils/__init__.py | tomy0000000/Tubee | 1bfbd3cde118cd8a31499b8255b311602fde85bc | [
"MIT"
] | 8 | 2020-12-09T13:01:41.000Z | 2022-01-09T10:06:25.000Z | tubee/utils/__init__.py | tomy0000000/Tubee | 1bfbd3cde118cd8a31499b8255b311602fde85bc | [
"MIT"
] | 141 | 2019-08-21T20:23:07.000Z | 2022-03-29T14:02:27.000Z | tubee/utils/__init__.py | tomy0000000/Tubee | 1bfbd3cde118cd8a31499b8255b311602fde85bc | [
"MIT"
] | 7 | 2020-07-28T08:52:06.000Z | 2021-07-26T02:15:36.000Z | """Helper Functions
Some Misc Functions used in this app
"""
import secrets
import string
from functools import wraps
from urllib.parse import urljoin, urlparse
from dateutil import parser
from flask import abort, current_app, request
from flask_login import current_user
from flask_migrate import upgrade
def setup_app():
# Migrate database to latest revision
upgrade()
current_app.logger.info("Database migrated")
from ..models.user import User
# Create an admin user if none exists
if not User.query.filter_by(admin=True).first():
# Create a random password
alphabet = string.ascii_letters + string.digits
password = "".join(secrets.choice(alphabet) for i in range(20))
User(username="admin", password=password, admin=True)
current_app.db.session.commit()
current_app.logger.info("Admin created automatically:")
current_app.logger.info("Username: admin")
current_app.logger.info(f"Password: {password}")
# Reschedule all tasks
from ..models import Channel
from ..tasks import remove_all_tasks, schedule_channel_renewal
remove_all_tasks()
current_app.logger.info("All tasks removed")
schedule_channel_renewal(Channel.query.all())
current_app.logger.info("Channel renewal scheduled")
# TODO: Update channels metadata
def try_parse_datetime(string):
try:
return parser.parse(string).replace(tzinfo=None)
except (ValueError, TypeError):
return None
def admin_required(*args, **kwargs):
if not current_user.admin:
abort(403)
def admin_required_decorator(func):
"""Restrict view function to admin-only
Arguments:
func {view function} -- The view function to be restricting
Returns:
view function -- The restricted function
"""
@wraps(func)
def decorated_view_function(*args, **kwargs):
admin_required()
return func(*args, **kwargs)
return decorated_view_function
def pushover_required(func):
"""Restrict view function to users who have configured Pushover account
Arguments:
func {view function} -- The view function to be restricting
Returns:
view function -- The restricted function
"""
@wraps(func)
def decorated_function(*args, **kwargs):
if not current_user.pushover:
abort(403)
return func(*args, **kwargs)
return decorated_function
def youtube_required(func):
"""Restrict view function to users who have configured YouTube account
Arguments:
func {view function} -- The view function to be restricting
Returns:
view function -- The restricted function
"""
@wraps(func)
def decorated_function(*args, **kwargs):
if not current_user.youtube:
abort(403)
return func(*args, **kwargs)
return decorated_function
def is_safe_url(target):
"""Helper used to check endpoint before redirecting user
Arguments:
target {url} -- a url with complete scheme and domain to be examine
Returns:
bool -- target is a safe url or not
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc
def notify_admin(initiator, service, **kwargs):
"""Send Notification to all Admin
A Temporary function used to notify admin
Arguments:
initiator {str} -- Action or reason that trigger this notification
service {str or notification.Service} -- Service used to send notification
**kwargs {dict} -- optional arguments passed to notification
Returns:
dict -- Response from notification service
"""
from ..models.user import User
admins = User.query.filter_by(admin=True).all()
response = {}
for admin in admins:
response[admin.username] = admin.send_notification(initiator, service, **kwargs)
return response
| 27.006711 | 88 | 0.685885 | 496 | 4,024 | 5.457661 | 0.308468 | 0.062061 | 0.035464 | 0.04433 | 0.309198 | 0.281862 | 0.240118 | 0.240118 | 0.240118 | 0.240118 | 0 | 0.003536 | 0.226889 | 4,024 | 148 | 89 | 27.189189 | 0.866602 | 0.327286 | 0 | 0.230769 | 0 | 0 | 0.05325 | 0 | 0 | 0 | 0 | 0.006757 | 0 | 1 | 0.169231 | false | 0.046154 | 0.184615 | 0 | 0.507692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
1889e33c1df53b96578448ca9e90add8e038bfe9 | 3,941 | py | Python | test/inprogress/mock/JobBrowserBFF_get_job_log_mock_test.py | eapearson/kbase-skd-module-job-browser-bff | 426445f90569adac16632ef4921f174e51abd42f | [
"MIT"
] | null | null | null | test/inprogress/mock/JobBrowserBFF_get_job_log_mock_test.py | eapearson/kbase-skd-module-job-browser-bff | 426445f90569adac16632ef4921f174e51abd42f | [
"MIT"
] | 6 | 2020-05-26T17:40:07.000Z | 2022-03-11T16:33:11.000Z | test/inprogress/mock/JobBrowserBFF_get_job_log_mock_test.py | eapearson/kbase-skd-module-job-browser-bff | 426445f90569adac16632ef4921f174e51abd42f | [
"MIT"
] | 1 | 2020-05-26T17:12:59.000Z | 2020-05-26T17:12:59.000Z | # -*- coding: utf-8 -*-
import traceback
from JobBrowserBFF.TestBase import TestBase
from biokbase.Errors import ServiceError
import unittest
import re
UPSTREAM_SERVICE = 'mock'
ENV = 'mock'
JOB_ID_WITH_LOGS = '59820c93e4b06f68bf751eeb' # non-admin
JOB_ID_NO_LOGS = '5cf1522aaa5a4d298c5dc2ff' # non-admin
JOB_ID_NOT_FOUND = '5cf1522aaa5a4d298c5dc2fe' # non-admin
JOB_ID_NO_PERMISSION = '57ec06aee4b0b05cf8996b89' # access it as non-admin user
TIMEOUT_MS = 10000
class JobBrowserBFFTest(TestBase):
def assert_job_log_result(self, ret):
self.assertIsInstance(ret, list)
result = ret[0]
self.assertIsInstance(result, dict)
self.assertIn('log', result)
job_log = result.get('log')
self.assertIsInstance(job_log, list)
total_count = result.get('total_count')
return job_log, total_count
# Uncomment to skip this test
# @unittest.skip("skipped test_get_job_log_happy")
def test_get_job_log_happy(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
try:
impl, context = self.impl_for(ENV, 'user')
ret = impl.get_job_log(context, {
'job_id': JOB_ID_WITH_LOGS,
'offset': 0,
'limit': 10,
'timeout': TIMEOUT_MS
})
job_log, total_count = self.assert_job_log_result(ret)
self.assertEqual(len(job_log), 10)
self.assertEqual(total_count, 215)
except Exception as ex:
self.assert_no_exception(ex)
# Uncomment to skip this test
# @unittest.skip("skipped test_get_job_log_happy")
def test_get_job_log_no_logs_happy(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
job_id = JOB_ID_NO_LOGS
try:
impl, context = self.impl_for(ENV, 'user')
ret = impl.get_job_log(context, {
'job_id': job_id,
'offset': 0,
'limit': 10,
'timeout': TIMEOUT_MS
})
job_log, total_count = self.assert_job_log_result(ret)
self.assertEqual(len(job_log), 0)
self.assertEqual(total_count, 0)
except Exception as ex:
self.assert_no_exception(ex)
# Uncomment to skip this test
# @unittest.skip("skipped test_get_job_log_happy")
def test_get_job_log_no_permission_sad(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
job_id = JOB_ID_NO_LOGS
try:
impl, context = self.impl_for(ENV, 'user')
ret = impl.get_job_log(context, {
'job_id': JOB_ID_NO_PERMISSION,
'offset': 0,
'limit': 10,
'timeout': TIMEOUT_MS
})
print('RET', ret)
self.assertTrue(False, 'Expected an exception')
except ServiceError as se:
self.assertEqual(
se.code, 40, 'Expected error code 40 (permission denied), but received {}'.format(se.code))
except Exception as ex:
self.assert_no_exception(ex)
# Uncomment to skip this test
# @unittest.skip("skipped test_get_job_log_happy")
def test_get_job_log_not_found_sad(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
job_id = JOB_ID_NO_LOGS
try:
impl, context = self.impl_for(ENV, 'user')
ret = impl.get_job_log(context, {
'job_id': JOB_ID_NOT_FOUND,
'offset': 0,
'limit': 10,
'timeout': TIMEOUT_MS
})
print('RET', ret)
self.assertTrue(False, 'Expected an exception')
except ServiceError as se:
self.assertEqual(
se.code, 10, 'Expected error code 10 (not found), but received {}'.format(se.code))
except Exception as ex:
self.assert_no_exception(ex)
| 36.831776 | 107 | 0.603146 | 479 | 3,941 | 4.68476 | 0.189979 | 0.058824 | 0.048128 | 0.046346 | 0.668449 | 0.654189 | 0.654189 | 0.654189 | 0.654189 | 0.628788 | 0 | 0.031533 | 0.299924 | 3,941 | 106 | 108 | 37.179245 | 0.781805 | 0.098198 | 0 | 0.595506 | 0 | 0 | 0.128531 | 0.027119 | 0 | 0 | 0 | 0 | 0.213483 | 1 | 0.05618 | false | 0 | 0.05618 | 0 | 0.134831 | 0.022472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
188cb20c595f8931979892b300bbc3dc12968c1c | 674 | py | Python | migrations/versions/323f8d77567b_index_related_entity_names.py | yaelmi3/backslash | edf39caf97af2c926da01c340a83648f4874e97e | [
"BSD-3-Clause"
] | 17 | 2015-11-25T13:02:38.000Z | 2021-12-14T20:18:36.000Z | migrations/versions/323f8d77567b_index_related_entity_names.py | yaelmi3/backslash | edf39caf97af2c926da01c340a83648f4874e97e | [
"BSD-3-Clause"
] | 533 | 2015-11-24T12:47:13.000Z | 2022-02-12T07:59:08.000Z | migrations/versions/323f8d77567b_index_related_entity_names.py | parallelsystems/backslash | 577cdd18d5f665a8b493c4b2e2a605b7e0f6e11b | [
"BSD-3-Clause"
] | 15 | 2015-11-22T13:25:54.000Z | 2022-02-16T19:23:11.000Z | """Index related entity names
Revision ID: 323f8d77567b
Revises: 82b34e2777a4
Create Date: 2016-11-16 13:00:25.782487
"""
# revision identifiers, used by Alembic.
revision = '323f8d77567b'
down_revision = '82b34e2777a4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_related_entity_name'), 'related_entity', ['name'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_related_entity_name'), table_name='related_entity')
### end Alembic commands ###
| 24.962963 | 93 | 0.71365 | 87 | 674 | 5.390805 | 0.517241 | 0.138593 | 0.108742 | 0.098081 | 0.302772 | 0.302772 | 0.302772 | 0.187633 | 0 | 0 | 0 | 0.098592 | 0.15727 | 674 | 26 | 94 | 25.923077 | 0.727113 | 0.451039 | 0 | 0 | 0 | 0 | 0.296736 | 0.130564 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
189184fcb0cca3093cef204f192b8979a5c7f238 | 29,762 | py | Python | SVS/model/utils/utils.py | ftshijt/SVS_system | 569d0a2f7ae89965bde132e5be538f6a84be471f | [
"Apache-2.0"
] | null | null | null | SVS/model/utils/utils.py | ftshijt/SVS_system | 569d0a2f7ae89965bde132e5be538f6a84be471f | [
"Apache-2.0"
] | null | null | null | SVS/model/utils/utils.py | ftshijt/SVS_system | 569d0a2f7ae89965bde132e5be538f6a84be471f | [
"Apache-2.0"
] | null | null | null | """Copyright [2020] [Jiatong Shi].
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# !/usr/bin/env python3
import copy
import librosa
from librosa.display import specshow
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy import signal
import soundfile as sf
from SVS.model.layers.global_mvn import GlobalMVN
import SVS.utils.metrics as Metrics
import time
import torch
# from SVS.model.layers.utterance_mvn import UtteranceMVN
# from pathlib import Path
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def collect_stats(train_loader, args):
"""collect_stats."""
print("get in collect stats", flush=True)
count, sum, sum_square = 0, 0, 0
count_mel, sum_mel, sum_square_mel = 0, 0, 0
for (
step,
(
phone,
beat,
pitch,
spec,
real,
imag,
length,
chars,
char_len_list,
mel,
),
) in enumerate(train_loader, 1):
# print(f"spec.shape: {spec.shape},length.shape:
# {length.shape}, mel.shape: {mel.shape}")
for i, seq in enumerate(spec.cpu().numpy()):
# print(f"seq.shape: {seq.shape}")
seq_length = torch.max(length[i])
# print(seq_length)
seq = seq[:seq_length]
sum += seq.sum(0)
sum_square += (seq ** 2).sum(0)
count += len(seq)
for i, seq in enumerate(mel.cpu().numpy()):
seq_length = torch.max(length[i])
seq = seq[:seq_length]
sum_mel += seq.sum(0)
sum_square_mel += (seq ** 2).sum(0)
count_mel += len(seq)
assert count_mel == count
dirnames = [
os.path.dirname(args.stats_file),
os.path.dirname(args.stats_mel_file),
]
for name in dirnames:
if not os.path.exists(name):
os.makedirs(name)
np.savez(
args.stats_file,
count=count,
sum=sum,
sum_square=sum_square,
)
np.savez(
args.stats_mel_file,
count=count_mel,
sum=sum_mel,
sum_square=sum_square_mel,
)
def train_one_epoch(
train_loader,
model,
device,
optimizer,
criterion,
perceptual_entropy,
epoch,
args,
):
"""train_one_epoch."""
losses = AverageMeter()
spec_losses = AverageMeter()
if args.perceptual_loss > 0:
pe_losses = AverageMeter()
if args.n_mels > 0:
mel_losses = AverageMeter()
# mcd_metric = AverageMeter()
# f0_distortion_metric, vuv_error_metric =
# AverageMeter(), AverageMeter()
if args.double_mel_loss:
double_mel_losses = AverageMeter()
model.train()
log_save_dir = os.path.join(
args.model_save_dir, "epoch{}/log_train_figure".format(epoch)
)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
start = time.time()
# f0_ground_truth_all = np.reshape(np.array([]), (-1, 1))
# f0_synthesis_all = np.reshape(np.array([]), (-1, 1))
for (
step,
(
phone,
beat,
pitch,
spec,
real,
imag,
length,
chars,
char_len_list,
mel,
),
) in enumerate(train_loader, 1):
phone = phone.to(device)
beat = beat.to(device)
pitch = pitch.to(device).float()
spec = spec.to(device).float()
if mel is not None:
mel = mel.to(device).float()
real = real.to(device).float()
imag = imag.to(device).float()
length_mask = length.unsqueeze(2)
if mel is not None:
length_mel_mask = length_mask.repeat(1, 1, mel.shape[2]).float()
length_mel_mask = length_mel_mask.to(device)
length_mask = length_mask.repeat(1, 1, spec.shape[2]).float()
length_mask = length_mask.to(device)
length = length.to(device)
char_len_list = char_len_list.to(device)
if not args.use_asr_post:
chars = chars.to(device)
char_len_list = char_len_list.to(device)
else:
phone = phone.float()
# output = [batch size, num frames, feat_dim]
# output_mel = [batch size, num frames, n_mels dimension]
if args.model_type == "GLU_Transformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "LSTM":
output, hidden, output_mel, output_mel2 = model(phone, pitch, beat)
att = None
elif args.model_type == "GRU_gs":
output, att, output_mel = model(spec, phone, pitch, beat, length, args)
att = None
elif args.model_type == "PureTransformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Conformer":
# print(f"chars: {np.shape(chars)}, phone:
# {np.shape(phone)}, length: {np.shape(length)}")
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Comformer_full":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "USTC_DAR":
output_mel = model(
phone, pitch, beat, length, args
) # mel loss written in spec loss
att = None
spec_origin = spec.clone()
mel_origin = mel.clone()
if args.normalize:
sepc_normalizer = GlobalMVN(args.stats_file)
mel_normalizer = GlobalMVN(args.stats_mel_file)
spec, _ = sepc_normalizer(spec, length)
mel, _ = mel_normalizer(mel, length)
if args.model_type == "USTC_DAR":
spec_loss = 0
else:
spec_loss = criterion(output, spec, length_mask)
if args.n_mels > 0:
mel_loss = criterion(output_mel, mel, length_mel_mask)
if args.double_mel_loss:
double_mel_loss = criterion(output_mel2, mel, length_mel_mask)
else:
double_mel_loss = 0
else:
mel_loss = 0
double_mel_loss = 0
train_loss = mel_loss + double_mel_loss + spec_loss
if args.perceptual_loss > 0:
pe_loss = perceptual_entropy(output, real, imag)
final_loss = (
args.perceptual_loss * pe_loss + (1 - args.perceptual_loss) * train_loss
)
else:
final_loss = train_loss
final_loss = final_loss / args.accumulation_steps
final_loss.backward()
if args.gradclip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradclip)
if (epoch + 1) % args.accumulation_steps == 0:
if args.optimizer == "noam":
optimizer.step_and_update_lr()
else:
optimizer.step()
# 梯度清零
optimizer.zero_grad()
losses.update(final_loss.item(), phone.size(0))
if args.model_type != "USTC_DAR":
spec_losses.update(spec_loss.item(), phone.size(0))
if args.perceptual_loss > 0:
pe_losses.update(pe_loss.item(), phone.size(0))
if args.n_mels > 0:
mel_losses.update(mel_loss.item(), phone.size(0))
if args.double_mel_loss:
double_mel_losses.update(double_mel_loss.item(), phone.size(0))
if step % args.train_step_log == 0:
end = time.time()
if args.model_type == "USTC_DAR":
# normalize inverse 只在infer的时候用,因为log过程需要转换成wav,和计算mcd等指标
if args.normalize and args.stats_file:
output_mel, _ = mel_normalizer.inverse(output_mel, length)
log_figure_mel(
step,
output_mel,
mel_origin,
att,
length,
log_save_dir,
args,
)
out_log = "step {}: train_loss {:.4f}; spec_loss {:.4f};".format(
step, losses.avg, spec_losses.avg
)
else:
# normalize inverse 只在infer的时候用,因为log过程需要转换成wav,和计算mcd等指标
if args.normalize and args.stats_file:
output, _ = sepc_normalizer.inverse(output, length)
log_figure(step, output, spec_origin, att, length, log_save_dir, args)
out_log = "step {}: train_loss {:.4f}; spec_loss {:.4f};".format(
step, losses.avg, spec_losses.avg
)
if args.perceptual_loss > 0:
out_log += "pe_loss {:.4f}; ".format(pe_losses.avg)
if args.n_mels > 0:
out_log += "mel_loss {:.4f}; ".format(mel_losses.avg)
if args.double_mel_loss:
out_log += "dmel_loss {:.4f}; ".format(double_mel_losses.avg)
print("{} -- sum_time: {:.2f}s".format(out_log, (end - start)))
info = {"loss": losses.avg, "spec_loss": spec_losses.avg}
if args.perceptual_loss > 0:
info["pe_loss"] = pe_losses.avg
if args.n_mels > 0:
info["mel_loss"] = mel_losses.avg
return info
def validate(dev_loader, model, device, criterion, perceptual_entropy, epoch, args):
"""validate."""
losses = AverageMeter()
spec_losses = AverageMeter()
if args.perceptual_loss > 0:
pe_losses = AverageMeter()
if args.n_mels > 0:
mel_losses = AverageMeter()
mcd_metric = AverageMeter()
if args.double_mel_loss:
double_mel_losses = AverageMeter()
model.eval()
log_save_dir = os.path.join(
args.model_save_dir, "epoch{}/log_val_figure".format(epoch)
)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
start = time.time()
with torch.no_grad():
for (
step,
(
phone,
beat,
pitch,
spec,
real,
imag,
length,
chars,
char_len_list,
mel,
),
) in enumerate(dev_loader, 1):
phone = phone.to(device)
beat = beat.to(device)
pitch = pitch.to(device).float()
spec = spec.to(device).float()
if mel is not None:
mel = mel.to(device).float()
real = real.to(device).float()
imag = imag.to(device).float()
length_mask = length.unsqueeze(2)
if mel is not None:
length_mel_mask = length_mask.repeat(1, 1, mel.shape[2]).float()
length_mel_mask = length_mel_mask.to(device)
length_mask = length_mask.repeat(1, 1, spec.shape[2]).float()
length_mask = length_mask.to(device)
length = length.to(device)
char_len_list = char_len_list.to(device)
if not args.use_asr_post:
chars = chars.to(device)
char_len_list = char_len_list.to(device)
else:
phone = phone.float()
if args.model_type == "GLU_Transformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "LSTM":
output, hidden, output_mel, output_mel2 = model(phone, pitch, beat)
att = None
elif args.model_type == "GRU_gs":
output, att, output_mel = model(spec, phone, pitch, beat, length, args)
att = None
elif args.model_type == "PureTransformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Conformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Comformer_full":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "USTC_DAR":
output_mel = model(phone, pitch, beat, length, args)
att = None
spec_origin = spec.clone()
mel_origin = mel.clone()
if args.normalize:
sepc_normalizer = GlobalMVN(args.stats_file)
mel_normalizer = GlobalMVN(args.stats_mel_file)
spec, _ = sepc_normalizer(spec, length)
mel, _ = mel_normalizer(mel, length)
if args.model_type == "USTC_DAR":
spec_loss = 0
else:
spec_loss = criterion(output, spec, length_mask)
if args.n_mels > 0:
mel_loss = criterion(output_mel, mel, length_mel_mask)
if args.double_mel_loss:
double_mel_loss = criterion(output_mel2, mel, length_mel_mask)
else:
double_mel_loss = 0
else:
mel_loss = 0
double_mel_loss = 0
dev_loss = mel_loss + double_mel_loss + spec_loss
if args.perceptual_loss > 0:
pe_loss = perceptual_entropy(output, real, imag)
final_loss = (
args.perceptual_loss * pe_loss
+ (1 - args.perceptual_loss) * dev_loss
)
else:
final_loss = dev_loss
losses.update(final_loss.item(), phone.size(0))
if args.model_type != "USTC_DAR":
spec_losses.update(spec_loss.item(), phone.size(0))
if args.perceptual_loss > 0:
# pe_loss = perceptual_entropy(output, real, imag)
pe_losses.update(pe_loss.item(), phone.size(0))
if args.n_mels > 0:
mel_losses.update(mel_loss.item(), phone.size(0))
if args.double_mel_loss:
double_mel_losses.update(double_mel_loss.item(), phone.size(0))
if args.model_type == "USTC_DAR":
# normalize inverse stage
if args.normalize and args.stats_file:
output_mel, _ = mel_normalizer.inverse(output_mel, length)
mcd_value, length_sum = (
0,
1,
) # FIX ME! Calculate_melcd_fromMelSpectrum
else:
# normalize inverse stage
if args.normalize and args.stats_file:
output, _ = sepc_normalizer.inverse(output, length)
(mcd_value, length_sum,) = Metrics.Calculate_melcd_fromLinearSpectrum(
output, spec_origin, length, args
)
mcd_metric.update(mcd_value, length_sum)
if step % args.dev_step_log == 0:
if args.model_type == "USTC_DAR":
log_figure_mel(
step,
output_mel,
mel_origin,
att,
length,
log_save_dir,
args,
)
else:
log_figure(
step,
output,
spec_origin,
att,
length,
log_save_dir,
args,
)
out_log = (
"step {}: train_loss {:.4f}; "
"spec_loss {:.4f}; mcd_value {:.4f};".format(
step, losses.avg, spec_losses.avg, mcd_metric.avg
)
)
if args.perceptual_loss > 0:
out_log += "pe_loss {:.4f}; ".format(pe_losses.avg)
if args.n_mels > 0:
out_log += "mel_loss {:.4f}; ".format(mel_losses.avg)
if args.double_mel_loss:
out_log += "dmel_loss {:.4f}; ".format(double_mel_losses.avg)
end = time.time()
print("{} -- sum_time: {}s".format(out_log, (end - start)))
info = {
"loss": losses.avg,
"spec_loss": spec_losses.avg,
"mcd_value": mcd_metric.avg,
}
if args.perceptual_loss > 0:
info["pe_loss"] = pe_losses.avg
if args.n_mels > 0:
info["mel_loss"] = mel_losses.avg
return info
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
"""init."""
self.reset()
def reset(self):
"""reset."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""update."""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, model_filename):
"""save_checkpoint."""
torch.save(state, model_filename)
return 0
def save_model(
args,
epoch,
model,
optimizer,
train_info,
dev_info,
logger,
save_loss_select,
):
"""save_model."""
if args.optimizer == "noam":
save_checkpoint(
{
"epoch": epoch,
"state_dict": model.state_dict(),
"optimizer": optimizer._optimizer.state_dict(),
},
"{}/epoch_{}_{}.pth.tar".format(
args.model_save_dir, save_loss_select, epoch
),
)
else:
save_checkpoint(
{
"epoch": epoch,
"state_dict": model.state_dict(),
},
"{}/epoch_{}_{}.pth.tar".format(
args.model_save_dir, save_loss_select, epoch
),
)
# record training and validation information
if args.use_tfboard:
record_info(train_info, dev_info, epoch, logger)
def record_info(train_info, dev_info, epoch, logger):
"""record_info."""
loss_info = {
"train_loss": train_info["loss"],
"dev_loss": dev_info["loss"],
}
logger.add_scalars("losses", loss_info, epoch)
return 0
def invert_spectrogram(spectrogram, win_length, hop_length):
"""Invert_spectrogram.
applies inverse fft.
Args:
spectrogram: [1+n_fft//2, t]
"""
return librosa.istft(spectrogram, hop_length, win_length=win_length, window="hann")
def griffin_lim(spectrogram, iter_vocoder, n_fft, hop_length, win_length):
"""griffin_lim."""
X_best = copy.deepcopy(spectrogram)
for i in range(iter_vocoder):
X_t = invert_spectrogram(X_best, win_length, hop_length)
est = librosa.stft(X_t, n_fft, hop_length, win_length=win_length)
phase = est / np.maximum(1e-8, np.abs(est))
X_best = spectrogram * phase
X_t = invert_spectrogram(X_best, win_length, hop_length)
y = np.real(X_t)
return y
def spectrogram2wav(
mag, max_db, ref_db, preemphasis, power, sr, hop_length, win_length, n_fft
):
"""Generate wave file from linear magnitude spectrogram.
Args:
mag: A numpy array of (T, 1+n_fft//2)
Returns:
wav: A 1-D numpy array.
"""
hop_length = int(hop_length * sr)
win_length = int(win_length * sr)
n_fft = n_fft
# transpose
mag = mag.T
# de-noramlize
mag = (np.clip(mag, 0, 1) * max_db) - max_db + ref_db
# to amplitude
mag = np.power(10.0, mag * 0.05)
# wav reconstruction
wav = griffin_lim(mag ** power, 100, n_fft, hop_length, win_length)
# de-preemphasis
wav = signal.lfilter([1], [1, -preemphasis], wav)
# trim
wav, _ = librosa.effects.trim(wav)
return wav.astype(np.float32)
def log_figure_mel(step, output, spec, att, length, save_dir, args):
"""log_figure_mel."""
# only get one sample from a batch
# save wav and plot spectrogram
output = output.cpu().detach().numpy()[0]
out_spec = spec.cpu().detach().numpy()[0]
length = np.max(length.cpu().detach().numpy()[0])
output = output[:length]
out_spec = out_spec[:length]
# FIX ME! Need WaveRNN to produce wav from mel-spec
# wav = spectrogram2wav(output, args.max_db, args.ref_db,
# args.preemphasis, args.power, args.sampling_rate,
# args.frame_shift, args.frame_length, args.nfft)
# wav_true = spectrogram2wav(out_spec, args.max_db,
# args.ref_db, args.preemphasis, args.power, args.sampling_rate,
# args.frame_shift, args.frame_length, args.nfft)
# if librosa.__version__ < '0.8.0':
# librosa.output.write_wav(os.path.join(save_dir,
# '{}.wav'.format(step)), wav, args.sampling_rate)
# librosa.output.write_wav(os.path.join(save_dir,
# '{}_true.wav'.format(step)), wav_true, args.sampling_rate)
# else:
# # librosa > 0.8 remove librosa.output.write_wav module
# sf.write(os.path.join(save_dir, '{}.wav'.format(step)),
# wav, args.sampling_rate,format='wav', subtype='PCM_24')
# sf.write(os.path.join(save_dir, '{}_true.wav'.format(step)),
# wav, args.sampling_rate,format='wav', subtype='PCM_24')
plt.subplot(1, 2, 1)
specshow(output.T)
plt.title("prediction")
plt.subplot(1, 2, 2)
specshow(out_spec.T)
plt.title("ground_truth")
plt.savefig(os.path.join(save_dir, "{}.png".format(step)))
if att is not None:
att = att.cpu().detach().numpy()[0]
att = att[:, :length, :length]
plt.subplot(1, 4, 1)
specshow(att[0])
plt.subplot(1, 4, 2)
specshow(att[1])
plt.subplot(1, 4, 3)
specshow(att[2])
plt.subplot(1, 4, 4)
specshow(att[3])
plt.savefig(os.path.join(save_dir, "{}_att.png".format(step)))
def log_figure(step, output, spec, att, length, save_dir, args):
"""log_figure."""
# only get one sample from a batch
# save wav and plot spectrogram
output = output.cpu().detach().numpy()[0]
out_spec = spec.cpu().detach().numpy()[0]
length = np.max(length.cpu().detach().numpy()[0])
output = output[:length]
out_spec = out_spec[:length]
wav = spectrogram2wav(
output,
args.max_db,
args.ref_db,
args.preemphasis,
args.power,
args.sampling_rate,
args.frame_shift,
args.frame_length,
args.nfft,
)
wav_true = spectrogram2wav(
out_spec,
args.max_db,
args.ref_db,
args.preemphasis,
args.power,
args.sampling_rate,
args.frame_shift,
args.frame_length,
args.nfft,
)
if librosa.__version__ < "0.8.0":
librosa.output.write_wav(
os.path.join(save_dir, "{}.wav".format(step)),
wav,
args.sampling_rate,
)
librosa.output.write_wav(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
)
else:
# librosa > 0.8 remove librosa.output.write_wav module
sf.write(
os.path.join(save_dir, "{}.wav".format(step)),
wav,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
sf.write(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
plt.subplot(1, 2, 1)
specshow(output.T)
plt.title("prediction")
plt.subplot(1, 2, 2)
specshow(out_spec.T)
plt.title("ground_truth")
plt.savefig(os.path.join(save_dir, "{}.png".format(step)))
if att is not None:
att = att.cpu().detach().numpy()[0]
att = att[:, :length, :length]
plt.subplot(1, 4, 1)
specshow(att[0])
plt.subplot(1, 4, 2)
specshow(att[1])
plt.subplot(1, 4, 3)
specshow(att[2])
plt.subplot(1, 4, 4)
specshow(att[3])
plt.savefig(os.path.join(save_dir, "{}_att.png".format(step)))
def log_mel(step, output_mel, spec, att, length, save_dir, args, voc_model):
"""log_mel."""
# only get one sample from a batch
# save wav and plot spectrogram
output_mel = output_mel.cpu().detach().numpy()[0]
out_spec = spec.cpu().detach().numpy()[0]
length = np.max(length.cpu().detach().numpy()[0])
output_mel = output_mel[:length]
out_spec = out_spec[:length]
wav = voc_model.generate(output_mel)
wav_true = spectrogram2wav(
out_spec,
args.max_db,
args.ref_db,
args.preemphasis,
args.power,
args.sampling_rate,
args.frame_shift,
args.frame_length,
args.nfft,
)
if librosa.__version__ < "0.8.0":
librosa.output.write_wav(
os.path.join(save_dir, "{}.wav".format(step)), wav, args.sampling_rate
)
librosa.output.write_wav(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
)
else:
# librosa > 0.8 remove librosa.output.write_wav module
sf.write(
os.path.join(save_dir, "{}.wav".format(step)),
wav,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
sf.write(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
plt.subplot(1, 2, 1)
specshow(output_mel.T)
plt.title("prediction")
plt.subplot(1, 2, 2)
specshow(out_spec.T)
plt.title("ground_truth")
plt.savefig(os.path.join(save_dir, "{}.png".format(step)))
if att is not None:
att = att.cpu().detach().numpy()[0]
att = att[:, :length, :length]
plt.subplot(1, 4, 1)
specshow(att[0])
plt.subplot(1, 4, 2)
specshow(att[1])
plt.subplot(1, 4, 3)
specshow(att[2])
plt.subplot(1, 4, 4)
specshow(att[3])
plt.savefig(os.path.join(save_dir, "{}_att.png".format(step)))
def Calculate_time(elapsed_time):
"""Calculate_time."""
elapsed_hours = int(elapsed_time / 3600)
elapsed_mins = int((elapsed_time - (elapsed_hours * 3600)) / 60)
elapsed_secs = int(elapsed_time - (elapsed_hours * 3600) - (elapsed_mins * 60))
return elapsed_hours, elapsed_mins, elapsed_secs
def Calculate_time_path(path):
"""Calculate_time_path."""
num_list = os.listdir(path)
total_time = 0
for number in num_list:
# print(number)
number_path = os.path.join(path, number)
# print(number_path)
wav_name_list = os.listdir(number_path)
for wav_name in wav_name_list:
wav_path = os.path.join(number_path, wav_name)
print(wav_path)
time = librosa.get_duration(filename=wav_path)
print(time)
total_time += time
return total_time
def Calculate_dataset_duration(dataset_path):
"""Calculate_dataset_duration."""
train_path = os.path.join(dataset_path, "train")
dev_path = os.path.join(dataset_path, "dev")
test_path = os.path.join(dataset_path, "test")
total_time = (
Calculate_time_path(train_path)
+ Calculate_time_path(dev_path)
+ Calculate_time_path(test_path)
)
hours, mins, secs = Calculate_time(total_time)
print(f"Time: {hours}h {mins}m {secs}s'")
if __name__ == "__main__":
# path = "/data5/jiatong/SVS_system/SVS/data/
# public_dataset/kiritan_data/wav_info"
path = "/data5/jiatong/SVS_system/SVS/data/public_dataset/hts_data/wav_info"
Calculate_dataset_duration(path)
| 32.140389 | 88 | 0.540152 | 3,537 | 29,762 | 4.315804 | 0.107153 | 0.018474 | 0.016377 | 0.016508 | 0.710973 | 0.690468 | 0.670226 | 0.664068 | 0.654373 | 0.642909 | 0 | 0.014256 | 0.347154 | 29,762 | 925 | 89 | 32.175135 | 0.771384 | 0.115012 | 0 | 0.680939 | 0 | 0 | 0.042314 | 0.006007 | 0 | 0 | 0 | 0 | 0.001381 | 1 | 0.024862 | false | 0 | 0.016575 | 0 | 0.055249 | 0.008287 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
189b834780427e5805e8ddb0880c32074d93411d | 879 | py | Python | pricecalc/pricecalc/apps/calc/migrations/0024_ldstp_alter_furnitureincalc_options.py | oocemb/Calculation | 91d202d1b5a2dde6376487147517310682294278 | [
"Apache-2.0"
] | null | null | null | pricecalc/pricecalc/apps/calc/migrations/0024_ldstp_alter_furnitureincalc_options.py | oocemb/Calculation | 91d202d1b5a2dde6376487147517310682294278 | [
"Apache-2.0"
] | null | null | null | pricecalc/pricecalc/apps/calc/migrations/0024_ldstp_alter_furnitureincalc_options.py | oocemb/Calculation | 91d202d1b5a2dde6376487147517310682294278 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 4.0.2 on 2022-03-25 11:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calc', '0023_delete_handle_alter_calctag_options_and_more'),
]
operations = [
migrations.CreateModel(
name='Ldstp',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('price', models.DecimalField(decimal_places=2, max_digits=8)),
('availability', models.CharField(max_length=32)),
],
),
migrations.AlterModelOptions(
name='furnitureincalc',
options={'verbose_name': 'Фурнитура в расчёте', 'verbose_name_plural': 'Фурнитура в расчёте'},
),
]
| 32.555556 | 117 | 0.602958 | 89 | 879 | 5.764045 | 0.707865 | 0.064327 | 0.070175 | 0.093567 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040562 | 0.270762 | 879 | 26 | 118 | 33.807692 | 0.75975 | 0.051195 | 0 | 0.1 | 1 | 0 | 0.201923 | 0.058894 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.05 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
189c6b3a4cd4803a7422b2fd630d54013aa0aa1e | 14,356 | py | Python | aiida_siesta/calculations/stm.py | mailhexu/aiida_siesta_plugin | 313ef4b3532b54d8d0c81788b683c53cb4701965 | [
"MIT"
] | null | null | null | aiida_siesta/calculations/stm.py | mailhexu/aiida_siesta_plugin | 313ef4b3532b54d8d0c81788b683c53cb4701965 | [
"MIT"
] | 2 | 2019-05-12T22:11:46.000Z | 2019-05-13T11:46:16.000Z | aiida_siesta/calculations/stm.py | mailhexu/aiida_siesta_plugin | 313ef4b3532b54d8d0c81788b683c53cb4701965 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
# Module with fdf-aware dictionary
from tkdict import FDFDict
from aiida.orm.calculation.job import JobCalculation
from aiida.common.exceptions import InputValidationError
from aiida.common.datastructures import CalcInfo
from aiida.common.utils import classproperty
from aiida.common.datastructures import CodeInfo
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.remote import RemoteData
__copyright__ = u"Copyright (c), 2015, ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland and ROBERT BOSCH LLC, USA. All rights reserved."
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.9.10"
__contributors__ = "Victor M. Garcia-Suarez, Alberto Garcia"
class STMCalculation(JobCalculation):
"""
Plugin for the "plstm" program in the Siesta distribution, which
takes and .LDOS or .RHO file and generates a plot file to simulate
an STM image.
"""
_stm_plugin_version = 'aiida-0.12.0--stm-0.9.10'
def _init_internal_params(self):
super(STMCalculation, self)._init_internal_params()
# Default Siesta output parser provided by AiiDA
self._default_parser = "siesta.stm"
# Keywords that cannot be set
# We need to canonicalize this!
self._aiida_blocked_keywords = ['mode','system-label','extension']
# Default input and output files
self._DEFAULT_INPUT_FILE = 'stm.in'
self._DEFAULT_OUTPUT_FILE = 'stm.out'
self._DEFAULT_PLOT_FILE = 'aiida.CH.STM'
self._OUTPUT_SUBFOLDER = './'
self._PREFIX = 'aiida'
self._INPUT_FILE_NAME = 'stm.in'
self._OUTPUT_FILE_NAME = 'stm.out'
self._PLOT_FILE_NAME = 'aiida.CH.STM'
# in restarts, it will copy from the parent the following
self._restart_copy_from = os.path.join(self._OUTPUT_SUBFOLDER, '*.LDOS')
# in restarts, it will copy the previous folder in the following one
self._restart_copy_to = self._OUTPUT_SUBFOLDER
@classproperty
def _use_methods(cls):
"""
Extend the parent _use_methods with further keys.
"""
retdict = JobCalculation._use_methods
retdict["settings"] = {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'settings',
'docstring': "Use an additional node for special settings",
}
retdict["parameters"] = {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'parameters',
'docstring': ("Use a node that specifies the input parameters "
"for the namelists"),
}
retdict["parent_folder"] = {
'valid_types': RemoteData,
'additional_parameter': None,
'linkname': 'parent_calc_folder',
'docstring': ("Use a remote folder as parent folder (for "
"restarts and similar"),
}
return retdict
def _prepare_for_submission(self,tempfolder,
inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
local_copy_list = []
remote_copy_list = []
# Process the settings dictionary first
# Settings can be undefined, and defaults to an empty dictionary
settings = inputdict.pop(self.get_linkname('settings'),None)
if settings is None:
settings_dict = {}
else:
if not isinstance(settings, ParameterData):
raise InputValidationError("settings, if specified, must be of "
"type ParameterData")
# Settings converted to UPPERCASE
# Presumably to standardize the usage and avoid
# ambiguities
settings_dict = _uppercase_dict(settings.get_dict(),
dict_name='settings')
try:
parameters = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this "
"calculation")
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
parent_calc_folder = inputdict.pop(self.get_linkname('parent_folder'))
except KeyError:
raise InputValidationError("No parent_calc_folder specified for this "
"calculation")
if not isinstance(parent_calc_folder, RemoteData):
raise InputValidationError("parent_calc_folder, if specified,"
"must be of type RemoteData")
#
# Important note: This program should NOT be run with MPI.
# Scripts using this plugin should use:
#
# calc.set_withmpi(False)
#
# We do it right here, and hope that it will not be overriden
#
# self.set_withmpi(False)
#
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this calculation")
# Here, there should be no more parameters...
if inputdict:
raise InputValidationError("The following input data nodes are "
"unrecognized: {}".format(inputdict.keys()))
# END OF INITIAL INPUT CHECK #
#
# There should be a warning for duplicated (canonicalized) keys
# in the original dictionary in the script
input_params = FDFDict(parameters.get_dict())
# Look for blocked keywords and
# add the proper values to the dictionary
for blocked_key in self._aiida_blocked_keywords:
canonical_blocked = FDFDict.translate_key(blocked_key)
for key in input_params:
if key == canonical_blocked:
raise InputValidationError(
"You cannot specify explicitly the '{}' flag in the "
"input parameters".format(input_params.get_last_key(key)))
input_params.update({'system-label': self._PREFIX})
input_params.update({'mode': 'constant-height'})
input_params.update({'extension': 'ldos'})
# Maybe check that the 'z' coordinate makes sense...
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
with open(input_filename,'w') as infile:
infile.write("aiida\n")
infile.write("ldos\n")
infile.write("constant-height\n")
# Convert height to bohr...
infile.write("{}\n".format(input_params['z']/0.529177))
infile.write("unformatted\n")
# ------------------------------------- END of input file creation
# The presence of a 'parent_calc_folder' is mandatory, to get the LDOS file
# as indicated in the self._restart_copy_from attribute.
# (this is not technically a restart, though)
# It will be copied to the current calculation's working folder.
if parent_calc_folder is not None:
remote_copy_list.append(
(parent_calc_folder.get_computer().uuid,
os.path.join(parent_calc_folder.get_remote_path(),
self._restart_copy_from),
self._restart_copy_to
))
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
#
# Empty command line by default
# Why use 'pop' ?
cmdline_params = settings_dict.pop('CMDLINE', [])
if cmdline_params:
calcinfo.cmdline_params = list(cmdline_params)
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
calcinfo.stdin_name = self._INPUT_FILE_NAME
calcinfo.stdout_name = self._OUTPUT_FILE_NAME
#
# Code information object
#
codeinfo = CodeInfo()
codeinfo.cmdline_params = list(cmdline_params)
codeinfo.stdin_name = self._INPUT_FILE_NAME
codeinfo.stdout_name = self._OUTPUT_FILE_NAME
codeinfo.code_uuid = code.uuid
calcinfo.codes_info = [codeinfo]
# Retrieve by default: the output file and the plot file
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME)
calcinfo.retrieve_list.append(self._PLOT_FILE_NAME)
# Any other files specified in the settings dictionary
settings_retrieve_list = settings_dict.pop('ADDITIONAL_RETRIEVE_LIST',[])
calcinfo.retrieve_list += settings_retrieve_list
return calcinfo
def _set_parent_remotedata(self,remotedata):
"""
Used to set a parent remotefolder that holds the LDOS file
from a previous Siesta calculation
"""
from aiida.common.exceptions import ValidationError
if not isinstance(remotedata,RemoteData):
raise ValueError('remotedata must be a RemoteData')
# complain if another remotedata is already found
input_remote = self.get_inputs(node_type=RemoteData)
if input_remote:
raise ValidationError("Cannot set several parent calculation to a "
"{} calculation".format(self.__class__.__name__))
self.use_parent_folder(remotedata)
def get_input_data_text(key,val, mapping=None):
"""
Given a key and a value, return a string (possibly multiline for arrays)
with the text to be added to the input file.
:param key: the flag name
:param val: the flag value. If it is an array, a line for each element
is produced, with variable indexing starting from 1.
Each value is formatted using the conv_to_fortran function.
:param mapping: Optional parameter, must be provided if val is a dictionary.
It maps each key of the 'val' dictionary to the corresponding
list index. For instance, if ``key='magn'``,
``val = {'Fe': 0.1, 'O': 0.2}`` and ``mapping = {'Fe': 2, 'O': 1}``,
this function will return the two lines ``magn(1) = 0.2`` and
``magn(2) = 0.1``. This parameter is ignored if 'val'
is not a dictionary.
"""
from aiida.common.utils import conv_to_fortran
# I check first the dictionary, because it would also match
# hasattr(__iter__)
if isinstance(val, dict):
if mapping is None:
raise ValueError("If 'val' is a dictionary, you must provide also "
"the 'mapping' parameter")
list_of_strings = []
for elemk, itemval in val.iteritems():
try:
idx = mapping[elemk]
except KeyError:
raise ValueError("Unable to find the key '{}' in the mapping "
"dictionary".format(elemk))
list_of_strings.append((idx," {0}({2}) = {1}\n".format(
key, conv_to_fortran(itemval), idx)))
# I first have to resort, then to remove the index from the first
# column, finally to join the strings
list_of_strings = zip(*sorted(list_of_strings))[1]
return "".join(list_of_strings)
elif hasattr(val,'__iter__'):
# a list/array/tuple of values
list_of_strings = [
"{0}({2}) {1}\n".format(key, conv_to_fortran(itemval), idx+1)
for idx, itemval in enumerate(val)]
return "".join(list_of_strings)
else:
# single value
if key[:6] == '%block':
bname = key.split()[1]
b1 = "{0} {1}".format(key, my_conv_to_fortran(val))
return b1 + "\n%endblock " + bname + "\n"
else:
return "{0} {1}\n".format(key, my_conv_to_fortran(val))
def my_conv_to_fortran(val):
"""
Special version to avoid surrounding strings with extra ' '. Otherwise the
fdf tokenizer will not split values and units, for example.
:param val: the value to be read and converted to a Fortran-friendly string.
"""
# Note that bool should come before integer, because a boolean matches also
# isinstance(...,int)
if (isinstance(val, bool)):
if val:
val_str = '.true.'
else:
val_str = '.false.'
elif (isinstance(val, (int, long))):
val_str = "{:d}".format(val)
elif (isinstance(val, float)):
val_str = ("{:18.10e}".format(val)).replace('e', 'd')
elif (isinstance(val, basestring)):
val_str = "{!s}".format(val)
else:
raise ValueError("Invalid value passed, accepts only bools, ints, "
"floats and strings")
return val_str
def _uppercase_dict(d, dict_name):
from collections import Counter
if isinstance(d,dict):
new_dict = dict((str(k).upper(), v) for k, v in d.iteritems())
if len(new_dict) != len(d):
num_items = Counter(str(k).upper() for k in d.keys())
double_keys = ",".join([k for k, v in num_items if v > 1])
raise InputValidationError(
"Inside the dictionary '{}' there are the following keys that "
"are repeated more than once when compared case-insensitively: "
"{}."
"This is not allowed.".format(dict_name, double_keys))
return new_dict
else:
raise TypeError("_lowercase_dict accepts only dictionaries as argument")
| 39.657459 | 278 | 0.60156 | 1,659 | 14,356 | 5.035564 | 0.26522 | 0.009576 | 0.017237 | 0.00814 | 0.147833 | 0.0565 | 0.038066 | 0.008619 | 0.008619 | 0.008619 | 0 | 0.00585 | 0.309348 | 14,356 | 361 | 279 | 39.767313 | 0.836712 | 0.132906 | 0 | 0.122549 | 0 | 0.004902 | 0.190159 | 0.004568 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.004902 | 0.058824 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
189ea5728ae22b441ea875f1bd0c5faac3a76ced | 294 | py | Python | example2.py | presidento/scripthelper | 71b9e69f2967fb8d352376213c046263d5c31849 | [
"MIT"
] | null | null | null | example2.py | presidento/scripthelper | 71b9e69f2967fb8d352376213c046263d5c31849 | [
"MIT"
] | 3 | 2020-04-28T13:14:31.000Z | 2021-01-15T09:41:56.000Z | example2.py | presidento/scripthelper | 71b9e69f2967fb8d352376213c046263d5c31849 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import scripthelper
scripthelper.add_argument("-n", "--name", help="Name to greet")
logger, args = scripthelper.bootstrap_args()
if args.name:
logger.debug("Name was provided")
logger.info(f"Hello {args.name}")
else:
logger.warning("Name was not provided")
| 24.5 | 63 | 0.710884 | 41 | 294 | 5.04878 | 0.634146 | 0.077295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003922 | 0.132653 | 294 | 11 | 64 | 26.727273 | 0.807843 | 0.071429 | 0 | 0 | 0 | 0 | 0.279412 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18aaa916c943bdb538fc41fcf2673ef26fba2444 | 3,603 | py | Python | djangocms_modules/models.py | crydotsnake/djangocms-modules | ab5b75ee1076e6fccab1a26b8dbe1c754c4de8d7 | [
"BSD-3-Clause"
] | 8 | 2019-01-29T15:11:30.000Z | 2020-06-07T19:27:50.000Z | djangocms_modules/models.py | crydotsnake/djangocms-modules | ab5b75ee1076e6fccab1a26b8dbe1c754c4de8d7 | [
"BSD-3-Clause"
] | 11 | 2018-12-14T14:01:06.000Z | 2020-09-02T09:02:49.000Z | djangocms_modules/models.py | divio/djangocms-modules | 8328f130cddd4cf5f90beca170d1303b95158cda | [
"BSD-3-Clause"
] | 3 | 2021-04-16T12:26:27.000Z | 2021-06-25T14:53:47.000Z | from django.conf import settings
from django.db import models
from django.dispatch import receiver
from django.urls import Resolver404, resolve
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from cms import operations
from cms.models import CMSPlugin, Placeholder
from cms.models.fields import PlaceholderField
from cms.signals import pre_placeholder_operation
from cms.utils.plugins import get_bound_plugins
def _get_placeholder_slot(category):
return 'module-category-{}'.format(category.pk)
@receiver(pre_placeholder_operation)
def sync_module_plugin(sender, **kwargs):
"""
Updates the created placeholder operation record,
based on the configured post operation handlers.
"""
operation_type = kwargs.pop('operation')
affected_operations = (operations.MOVE_PLUGIN, operations.PASTE_PLUGIN)
if operation_type not in affected_operations:
return
try:
match = resolve(kwargs['origin'])
except Resolver404:
match = None
is_in_modules = match and match.url_name == 'cms_modules_list'
if not is_in_modules:
return
plugin = kwargs['plugin']
placeholder = kwargs.get('target_placeholder')
needs_sync = (
plugin.plugin_type
== 'Module'
and placeholder.pk
!= plugin.module_category.modules_id
)
if needs_sync:
# User has moved module to another category placeholder
# or pasted a copied module plugin.
new_category = Category.objects.get(modules=placeholder)
(ModulePlugin
.objects
.filter(path__startswith=plugin.path, depth__gte=plugin.depth)
.update(module_category=new_category))
class Category(models.Model):
name = models.CharField(
verbose_name=_('Name'),
max_length=120,
unique=True,
)
modules = PlaceholderField(slotname=_get_placeholder_slot)
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __str__(self):
return self.name
@cached_property
def modules_placeholder(self):
return ModulesPlaceholder.objects.get(pk=self.modules_id)
def get_non_empty_modules(self):
unbound_plugins = (
self
.modules
.get_plugins(language=settings.LANGUAGE_CODE)
.filter(parent__isnull=True, numchild__gte=1)
)
return get_bound_plugins(unbound_plugins)
class ModulesPlaceholder(Placeholder):
class Meta:
proxy = True
def _get_attached_model(self):
return Category
def _get_attached_models(self):
return self._get_attached_model()
def _get_attached_objects(self):
return self._get_attached_model().objects.filter(modules=self.pk)
@cached_property
def category(self):
return self._get_attached_model().objects.get(modules=self.pk)
def get_label(self):
return self.category.name
class ModulePlugin(CMSPlugin):
module_name = models.CharField(
verbose_name=_('Name'),
max_length=120,
)
module_category = models.ForeignKey(
to=Category,
verbose_name=_('Category'),
on_delete=models.CASCADE,
)
def __str__(self):
return self.module_name
def update(self, refresh=False, **fields):
ModulePlugin.objects.filter(pk=self.pk).update(**fields)
if refresh:
return self.reload()
return
def get_unbound_plugins(self):
return CMSPlugin.get_tree(self).order_by('path')
| 27.090226 | 75 | 0.686095 | 417 | 3,603 | 5.669065 | 0.309353 | 0.038071 | 0.035533 | 0.021574 | 0.099831 | 0.08291 | 0.07022 | 0.038917 | 0.038917 | 0 | 0 | 0.004683 | 0.229531 | 3,603 | 132 | 76 | 27.295455 | 0.846902 | 0.051901 | 0 | 0.135417 | 0 | 0 | 0.034462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135417 | false | 0 | 0.114583 | 0.104167 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
18ae5fde1fdfdd5b09f5207f83e23ef0e8f54a07 | 854 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ripng_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ripng_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/ripng_template.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class RIPng(Base):
__slots__ = ()
_SDM_NAME = 'ripng'
_SDM_ATT_MAP = {
'RIPng Header': 'ripng.header.ripngHeader',
'Route Table entries': 'ripng.header.routeTableEntries',
}
def __init__(self, parent):
super(RIPng, self).__init__(parent)
@property
def RIPng_Header(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RIPng Header']))
@property
def Route_Table_entries(self):
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Route Table entries']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 30.5 | 94 | 0.701405 | 104 | 854 | 5.375 | 0.307692 | 0.09839 | 0.135957 | 0.069767 | 0.406082 | 0.350626 | 0.350626 | 0.350626 | 0.350626 | 0.350626 | 0 | 0 | 0.199063 | 854 | 27 | 95 | 31.62963 | 0.817251 | 0 | 0 | 0.190476 | 0 | 0 | 0.141686 | 0.063232 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.190476 | 0.047619 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
18af0a7d2a7ce2d43b7672a9c24d93c96068fd61 | 1,083 | py | Python | backend/feedback/migrations/0001_initial.py | kylecarter/ict4510-advwebdvlp | 0360b2353535611a6b3dd79cefe2d5780d027511 | [
"Apache-2.0"
] | null | null | null | backend/feedback/migrations/0001_initial.py | kylecarter/ict4510-advwebdvlp | 0360b2353535611a6b3dd79cefe2d5780d027511 | [
"Apache-2.0"
] | null | null | null | backend/feedback/migrations/0001_initial.py | kylecarter/ict4510-advwebdvlp | 0360b2353535611a6b3dd79cefe2d5780d027511 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.3 on 2018-11-18 02:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('contact', models.CharField(help_text='Name of the contact', max_length=255, verbose_name='Full Name')),
('email', models.EmailField(help_text='Contact email.', max_length=255, verbose_name='Email')),
('message', models.TextField(help_text='Message provided by the contact.', verbose_name='Message')),
('resolution', models.TextField(blank=True, help_text='Resolution if any for the conversation.', null=True, verbose_name='Resolution')),
],
),
]
| 40.111111 | 152 | 0.626962 | 120 | 1,083 | 5.508333 | 0.516667 | 0.083207 | 0.069592 | 0.081694 | 0.160363 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025516 | 0.240074 | 1,083 | 26 | 153 | 41.653846 | 0.777643 | 0.041551 | 0 | 0 | 1 | 0 | 0.197876 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18b146154d393893b10c35ac0c235675a70fdc26 | 1,377 | py | Python | Aula19/ex09.py | danicon/MD3-Curso_Python | 3d419d440d3b28adb5c019268f4b217e7d0ce45a | [
"MIT"
] | null | null | null | Aula19/ex09.py | danicon/MD3-Curso_Python | 3d419d440d3b28adb5c019268f4b217e7d0ce45a | [
"MIT"
] | null | null | null | Aula19/ex09.py | danicon/MD3-Curso_Python | 3d419d440d3b28adb5c019268f4b217e7d0ce45a | [
"MIT"
] | null | null | null | jogador = dict()
partidas = list()
jogador['nome'] = str(input('Nome do jogador: '))
tot = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
for c in range(0, tot):
partidas.append(int(input(f' Quantos gols na partida {c}? ')))
jogador['gols'] = partidas[:]
jogador['total'] = sum(partidas)
print(30*'-=')
print(jogador)
print(30*'-=')
for k, v in jogador.items():
print(f'O campo {k} tem o valor {v}')
print(30*'-=')
print(f'O jogador {jogador["nome"]} jogou {len(jogador["gols"])} partidas.')
for i, v in enumerate(jogador["gols"]):
print(f' => Na partida {i}, fez {v} gols.')
print(f'Foi um total de {jogador["total"]} gols.')
# Ou
# jogador = dict()
# partidas = list()
# p = tot = 0
# jogador['nome'] = str(input('Nome do Jogador: '))
# quant = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
# while p < quant:
# jogos = int(input(f' Quantos gols na partida {p}? '))
# partidas.append(jogos)
# tot += jogos
# p += 1
# jogador['gols'] = partidas
# jogador['total'] = tot
# print(30*'-=')
# print(jogador)
# print(30*'-=')
# for k, v in jogador.items():
# print(f'O campo {k} tem o valor {v}')
# print(30*'-=')
# print(f'O jogador {jogador["nome"]} jogou {quant} partidas.')
# for c, g in enumerate(partidas):
# print(f' => Na partida {c}, fez {g} gols.')
# print(f'Foi um total de {jogador["total"]} gols.') | 31.295455 | 76 | 0.600581 | 205 | 1,377 | 4.034146 | 0.234146 | 0.058041 | 0.043531 | 0.055623 | 0.665054 | 0.590085 | 0.590085 | 0.442563 | 0.345828 | 0.345828 | 0 | 0.013181 | 0.173566 | 1,377 | 44 | 77 | 31.295455 | 0.713533 | 0.48947 | 0 | 0.166667 | 0 | 0 | 0.414706 | 0.032353 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.444444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
18b252f0addcf4c4512b055a5ed661c24cb4f654 | 3,658 | py | Python | interpreter.py | Wheatwizard/Lost | 59281e2e8ab6f0fd35b8496b5f04b2a4a8d7b350 | [
"MIT"
] | 13 | 2017-08-10T21:54:12.000Z | 2021-12-08T12:50:31.000Z | interpreter.py | Wheatwizard/Lost | 59281e2e8ab6f0fd35b8496b5f04b2a4a8d7b350 | [
"MIT"
] | null | null | null | interpreter.py | Wheatwizard/Lost | 59281e2e8ab6f0fd35b8496b5f04b2a4a8d7b350 | [
"MIT"
] | null | null | null | from Stack import Stack
from random import randint
class Interpreter(object):
def __init__(self,source,input,startx=None,starty=None,dir=None):
source = source.strip().split("\n")
dim = max(map(len,source)+[len(source)])
self.source = [list(x.ljust(dim,"."))for x in source]
self.dim = (len(self.source),len(self.source[0]))
if dir == None:
self.direction = [[1,0],[0,1],[-1,0],[0,-1]][randint(0,3)]
else:
self.direction = dir
if (startx,starty) == (None,None):
self.location = [randint(0,self.dim[0]-1),randint(0,self.dim[1]-1)]
else:
self.location = [startx,starty]
self.memory = Stack(input)
self.scope = Stack()
self.read = False
self.safety = False
def wrapAround(self):
self.location[0] %= self.dim[0]
self.location[1] %= self.dim[1]
def move(self):
self.location = [
self.location[0]+self.direction[0],
self.location[1]+self.direction[1]
]
#Important bit
if self.location[0] < 0:
self.wrapAround()
if self.location[1] < 0:
self.wrapAround()
if self.location[0] >= self.dim[0]:
self.wrapAround()
if self.location[1] >= self.dim[1]:
self.wrapAround()
def character(self):
return self.source[self.location[0]][self.location[1]]
def action(self):
if self.read:
if self.character() == '"':
self.read = False
else:
self.memory.append(ord(self.character()))
elif self.character() == "/":
self.direction = map(lambda x:-x,self.direction[::-1])
elif self.character() == "\\":
self.direction = self.direction[::-1]
elif self.character() == "|":
self.direction[1] *= -1
elif self.character() == ">":
self.direction = [0,1]
elif self.character() == "<":
self.direction = [0,-1]
elif self.character() == "v":
self.direction = [1,0]
elif self.character() == "^":
self.direction = [-1,0]
elif self.character() == "%":
self.safety = True
elif self.character() == "#":
self.safety = False
elif self.character() == "@":
if self.safety:
self.direction = [0,0]
elif self.character() == "[":
if self.direction[1] == 1:
self.direction[1] = -1
if self.direction[1]:
self.source[self.location[0]][self.location[1]] = "]"
elif self.character() == "]":
if self.direction[1] == -1:
self.direction[1] = 1
if self.direction[1]:
self.source[self.location[0]][self.location[1]] = "["
elif self.character() in "0123456879":
self.memory.append(int(self.character()))
elif self.character() == "+":
self.memory.append(self.memory.pop()+self.memory.pop())
elif self.character() == "*":
self.memory.append(self.memory.pop()*self.memory.pop())
elif self.character() == "-":
self.memory.append(-self.memory.pop())
elif self.character() == ":":
self.memory.append(self.memory[-1])
elif self.character() == "$":
a,b=self.memory.pop(),self.memory.pop()
self.memory.append(a)
self.memory.append(b)
elif self.character() == "!":
self.move()
elif self.character() == "?":
if self.memory.pop():
self.move()
elif self.character() == "(":
self.scope.append(self.memory.pop())
elif self.character() == ")":
self.memory.append(self.scope.pop())
elif self.character() == '"':
self.read = True
def output(self,screen,a,b):
try:
import curses
curselib = curses
except ImportError:
import unicurses
curselib = unicurses
for x in range(self.dim[0]):
for y in range(self.dim[1]):
try:
if [x,y] == self.location:
if curselib.has_colors():
screen.addstr(a+x,b+y*2,"X",curselib.color_pair(1))
else:
screen.addstr(a+x,b+y*2,"X")
else:
screen.addstr(a+x,b+y*2,self.source[x][y])
except:pass
| 29.983607 | 70 | 0.617824 | 519 | 3,658 | 4.342967 | 0.146435 | 0.149956 | 0.173469 | 0.149068 | 0.537267 | 0.460515 | 0.409051 | 0.362467 | 0.250222 | 0.250222 | 0 | 0.026499 | 0.174686 | 3,658 | 121 | 71 | 30.231405 | 0.720106 | 0.003554 | 0 | 0.161017 | 0 | 0 | 0.011251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.008475 | 0.042373 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18b25e53c1ed1abb7bdec386aaba62360b44deb4 | 1,826 | py | Python | masterStock.py | Coway/premeStock | 27106fd581b71df1729f94a79f5a6a10b41ece00 | [
"MIT"
] | 69 | 2017-03-09T00:24:09.000Z | 2021-11-15T05:52:09.000Z | masterStock.py | Coway/premeStock | 27106fd581b71df1729f94a79f5a6a10b41ece00 | [
"MIT"
] | 12 | 2017-03-11T04:31:29.000Z | 2018-06-21T03:54:28.000Z | masterStock.py | supthunder/premeStock | 27106fd581b71df1729f94a79f5a6a10b41ece00 | [
"MIT"
] | 19 | 2017-03-05T22:16:37.000Z | 2020-06-23T22:41:33.000Z | import requests
from bs4 import BeautifulSoup
import json
def loadMasterStock():
url = "http://www.supremenewyork.com/mobile_stock.json"
user = {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Version/10.0 Mobile/14D27 Safari/602.1"}
# user = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}
r = requests.get(url, headers=user)
masterStock = json.loads(r.text)
with open("masterstock.json", 'w') as outfile:
json.dump(masterStock, outfile, indent=4, sort_keys=True)
print("Saved to masterstock.json")
itemInfo = ""
while(True):
try:
item = input("Enter item name to get id or cntrl-c to quit: ")
except:
print("Exiting...")
if itemInfo != "":
itemInfo = itemInfo[:-1]
print("\n"+itemInfo)
with open("filteredStock.txt",'w') as outfile:
outfile.write(itemInfo)
exit()
if item == "new":
print("Getting all new items...")
for itemCount in range(len(masterStock['products_and_categories']["new"])):
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
else:
for itemCount in range(len(masterStock['products_and_categories']["new"])):
if item.lower() in str(masterStock['products_and_categories']["new"][itemCount]['name']).lower():
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
print("Added "+str(masterStock['products_and_categories']["new"][itemCount]['name']))
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
# print(itemInfo)
if __name__ == '__main__':
loadMasterStock()
| 41.5 | 161 | 0.680723 | 243 | 1,826 | 4.99177 | 0.423868 | 0.125309 | 0.145095 | 0.211047 | 0.405606 | 0.405606 | 0.369332 | 0.369332 | 0.285243 | 0.093982 | 0 | 0.032136 | 0.130887 | 1,826 | 43 | 162 | 42.465116 | 0.732199 | 0.088171 | 0 | 0.166667 | 0 | 0.027778 | 0.355596 | 0.11071 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.083333 | 0 | 0.111111 | 0.138889 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18b6001fed8371bb91ce9e52ae604dbe21d1ea14 | 5,353 | py | Python | release.py | dhleong/beholder | 1459c67907c436f6abc2abcd82c817e177fcd85f | [
"MIT"
] | 4 | 2020-03-11T01:35:42.000Z | 2021-08-31T20:18:22.000Z | release.py | dhleong/beholder | 1459c67907c436f6abc2abcd82c817e177fcd85f | [
"MIT"
] | 15 | 2018-04-29T20:25:14.000Z | 2020-03-14T13:44:59.000Z | release.py | dhleong/beholder | 1459c67907c436f6abc2abcd82c817e177fcd85f | [
"MIT"
] | 1 | 2020-10-27T22:43:46.000Z | 2020-10-27T22:43:46.000Z | #!/usr/bin/env python
#
# Release script for beholder
#
import hashlib
import urllib
from collections import OrderedDict
try:
from hostage import * #pylint: disable=unused-wildcard-import,wildcard-import
except ImportError:
print "!! Release library unavailable."
print "!! Use `pip install hostage` to fix."
print "!! You will also need an API token in .github.token,"
print "!! a .hubrrc config, or `brew install hub` configured."
print "!! A $GITHUB_TOKEN env variable will also work."
exit(1)
#
# Globals
#
notes = File(".last-release-notes")
latestTag = git.Tag.latest()
def sha256(fileUrl, blockSize=65536):
# based on: https://gist.github.com/rji/b38c7238128edf53a181
hasher = hashlib.sha256()
shafp = urllib.urlopen(fileUrl)
for block in iter(lambda: shafp.read(blockSize), b''):
hasher.update(block)
shafp.close()
return hasher.hexdigest()
def formatIssue(issue):
return "- {title} (#{number})\n".format(
number=issue.number,
title=issue.title)
def buildLabeled(labelsToTitles):
"""Given a set of (label, title) tuples, produces an
OrderedDict whose keys are `label`, and whose values are
dictionaries containing 'title' -> `title`, and
'content' -> string. The iteration order of the dictionary
will preserve the ordering of the provided tuples
"""
result = OrderedDict()
for k, v in labelsToTitles:
result[k] = {'title': v, 'content': ''}
return result
def buildDefaultNotes(_):
if not latestTag: return ''
logParams = {
'path': latestTag.name + "..HEAD",
'grep': ["Fix #", "Fixes #", "Closes #"],
'pretty': "format:- %s"}
logParams["invertGrep"] = True
msgs = git.Log(**logParams).output()
contents = ''
lastReleaseDate = latestTag.get_created_date()
if lastReleaseDate.tzinfo:
# pygithub doesn't respect tzinfo, so we have to do it ourselves
lastReleaseDate -= lastReleaseDate.tzinfo.utcoffset(lastReleaseDate)
lastReleaseDate.replace(tzinfo=None)
closedIssues = github.find_issues(state='closed', since=lastReleaseDate)
labeled = buildLabeled([
['feature', "New Features"],
['enhancement', "Enhancements"],
['bug', "Bug Fixes"],
['_default', "Other resolved tickets"],
])
if closedIssues:
for issue in closedIssues:
found = False
for label in labeled.iterkeys():
if label in issue.labels:
labeled[label]['content'] += formatIssue(issue)
found = True
break
if not found:
labeled['_default']['content'] += formatIssue(issue)
for labeledIssueInfo in labeled.itervalues():
if labeledIssueInfo['content']:
contents += "\n**{title}**:\n{content}".format(**labeledIssueInfo)
if msgs: contents += "\n**Notes**:\n" + msgs
return contents.strip()
#
# Verify
#
verify(Grep("stopship", inDir="src").foundAny(silent=False)) \
.then(echoAndDie("I don't think so"))
version = verify(File("src/beholder.go")
.filtersTo(RegexFilter('const Version = "(.*)"'))
).valueElse(echoAndDie("No version!?"))
versionTag = git.Tag(version)
verify(versionTag.exists())\
.then(echoAndDie("Version `%s` already exists!" % version))
#
# Make sure all the tests pass
#
# this syntax recursively checks all subpackages for tests
verify(Execute("go test ./... -v")).succeeds(silent=False).orElse(die())
#
# Build the release notes
#
initialNotes = verify(notes.contents()).valueElse(buildDefaultNotes)
notes.delete()
verify(Edit(notes, withContent=initialNotes).didCreate())\
.orElse(echoAndDie("Aborted due to empty message"))
releaseNotes = notes.contents()
#
# Compile
#
versions = [
# (label, os, arch) tuples
("macOS", "darwin", "amd64"),
("windows-x64", "windows", "amd64"),
]
compiled = []
for (buildLabel, os, arch) in versions:
f = 'bin/beholder-%s-%s' % (version, buildLabel)
if os == "windows":
f += ".exe"
print "Compiling:", f
cmd = 'env GOOS=%s GOARCH=%s go build -v -o %s' % (os, arch, f)
verify(Execute(cmd)).succeeds(silent=False)
compiled.append(f)
#
# Upload to github
#
print "Uploading to Github..."
verify(versionTag).create()
verify(versionTag).push("origin")
gitRelease = github.Release(version)
verify(gitRelease).create(body=releaseNotes)
for f in compiled:
print "Uploading", f
verify(gitRelease).uploadFile(f, 'application/octet-stream')
#
# Update homebrew repo
#
print "Updating homebrew..."
tarUrl = 'https://github.com/dhleong/beholder/archive/%s.tar.gz' % version
tarSha = sha256(tarUrl)
homebrewConfig = github.Config("dhleong/homebrew-tap")
formulaFile = github.RepoFile("/Formula/beholder.rb", config=homebrewConfig)
oldContents = formulaFile.read()
newContents = oldContents
newContents = re.sub('url "[^"]+"', 'url "%s"' % tarUrl, newContents)
newContents = re.sub('sha256 "[^"]+"', 'sha256 "%s"' % tarSha, newContents)
print " url <-", tarUrl
print " sha256 <-", tarSha
commit = 'Update for v%s' % version
verify(formulaFile).write(newContents, commitMessage=commit)
#
# Success! Now, just cleanup and we're done!
#
notes.delete()
print "Done! Published %s" % version
| 27.172589 | 82 | 0.64618 | 604 | 5,353 | 5.715232 | 0.466887 | 0.015064 | 0.013326 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010351 | 0.205866 | 5,353 | 196 | 83 | 27.311224 | 0.801694 | 0.086494 | 0 | 0.017391 | 0 | 0 | 0.22575 | 0.010729 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.043478 | null | null | 0.104348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18b77fe12dbcd84b5d365548128c4a03151b1396 | 3,949 | py | Python | src/simulator/simulator.py | ed741/PathBench | 50fe138eb1f824f49fe1a862705e435a1c3ec3ae | [
"BSD-3-Clause"
] | 46 | 2020-12-25T04:09:15.000Z | 2022-03-25T12:32:42.000Z | src/simulator/simulator.py | ed741/PathBench | 50fe138eb1f824f49fe1a862705e435a1c3ec3ae | [
"BSD-3-Clause"
] | 36 | 2020-12-21T16:10:02.000Z | 2022-01-03T01:42:01.000Z | src/simulator/simulator.py | judicaelclair/PathBenchURO | 101e67674efdfa8e27e1cf7787dac9fdf99552fe | [
"BSD-3-Clause"
] | 11 | 2021-01-06T23:34:12.000Z | 2022-03-21T17:21:47.000Z | from typing import Optional
from algorithms.basic_testing import BasicTesting
from simulator.controllers.main_controller import MainController
from simulator.controllers.map.map_controller import MapController
from simulator.controllers.gui.gui_controller import GuiController
from simulator.models.main_model import MainModel
from simulator.models.map_model import MapModel
from simulator.services.debug import DebugLevel
from simulator.services.services import Services
from simulator.services.event_manager.events.event import Event
from simulator.services.event_manager.events.reinit_event import ReinitEvent
from simulator.views.main_view import MainView
from simulator.views.map.map_view import MapView
from simulator.views.gui.gui_view import GuiView
from structures import Size
"""
Implementation is done after https://github.com/wesleywerner/mvc-game-design
"""
class Simulator:
"""
The main simulator class
"""
__services: Services
__main: MainModel
__map: MapModel
__main_controller: MainController
__map_controller: MapController
__gui_controller: GuiController
__main_view: MainView
__map_view: MapView
__gui_view: GuiView
def __init__(self, services: Services) -> None:
# init services
self.__services = services
self.__services.ev_manager.register_listener(self)
self.__main = None
self.__map = None
self.__main_controller = None
self.__map_controller = None
self.__gui_controller = None
self.__main_view = None
self.__map_view = None
def start(self) -> Optional[BasicTesting]:
"""
Starts the simulator
:return The testing results if any
"""
if self.__services.settings.simulator_graphics:
return self.__start_with_graphics()
else:
return self.__start_without_graphics()
def __try_setup_map_graphics(self) -> None:
if self.__services.algorithm.instance is not None:
if self.__map_controller is not None:
self.__map_controller.destroy()
if self.__map_view is not None:
self.__map_view.destroy()
self.__map = MapModel(self.__services)
self.__map_view = MapView(self.__services, self.__map, self.__main_view)
self.__map_controller = MapController(self.__map_view, self.__services, self.__map)
def __start_with_graphics(self) -> None:
"""
Starts simulator with graphics
"""
# init models, views, controllers
self.__main = MainModel(self.__services)
# init views
self.__main_view = MainView(self.__services, self.__main, None)
self.__gui_view = GuiView(self.__services, None, self.__main_view)
# init controllers
self.__main_controller = MainController(self.__services, self.__main)
self.__gui_controller = GuiController(self.__gui_view, self.__services,self.__main)
self.__try_setup_map_graphics()
self.__main.run()
def __start_without_graphics(self) -> Optional[BasicTesting]:
"""
Starts simulator without graphics
:return: The test results
"""
self.__services.algorithm.instance.find_path()
return self.__services.algorithm.instance.testing
def notify(self, event: Event) -> None:
if isinstance(event, ReinitEvent):
if self.__map:
"""
self.__map.stop_algorithm()
if self.__map.last_thread:
self.__map.last_thread.join()
"""
self.__map.reset()
self.__services.ev_manager.unregister_listener(self.__map)
self.__services.ev_manager.unregister_tick_listener(self.__map)
self.__try_setup_map_graphics()
@property
def services(self) -> Services:
return self.__services
| 34.640351 | 95 | 0.683971 | 441 | 3,949 | 5.653061 | 0.197279 | 0.056157 | 0.038508 | 0.025271 | 0.115925 | 0.031288 | 0 | 0 | 0 | 0 | 0 | 0 | 0.239301 | 3,949 | 113 | 96 | 34.946903 | 0.829893 | 0.062294 | 0 | 0.028571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.214286 | 0.014286 | 0.514286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
18c72218e5a46e6e788b195ce2de8f4c86c23159 | 444 | py | Python | qmt/geometry/geo_data_base.py | basnijholt/qmt | 68f781ff489fd9f5ddc817dacfc8ff3a8fdeb2b4 | [
"MIT"
] | null | null | null | qmt/geometry/geo_data_base.py | basnijholt/qmt | 68f781ff489fd9f5ddc817dacfc8ff3a8fdeb2b4 | [
"MIT"
] | null | null | null | qmt/geometry/geo_data_base.py | basnijholt/qmt | 68f781ff489fd9f5ddc817dacfc8ff3a8fdeb2b4 | [
"MIT"
] | null | null | null | from typing import Any, Dict, List
from qmt.infrastructure import WithParts
class GeoData(WithParts):
def __init__(self, lunit: str = "nm"):
"""Base class for geometry data objects.
Parameters
----------
lunit : str, optional
Length unit for this geometry, by default "nm"
"""
self.lunit: str = lunit
self.build_order: List[str] = []
super().__init__()
| 26.117647 | 58 | 0.572072 | 49 | 444 | 5 | 0.653061 | 0.097959 | 0.097959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.313063 | 444 | 16 | 59 | 27.75 | 0.803279 | 0.29955 | 0 | 0 | 0 | 0 | 0.007813 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
18e718827e2560736ccb159689ee15cc3157f2a5 | 4,084 | py | Python | empyric/collection/controllers.py | dmerthe/empyric | 7553b71e241709836cdef156afa7dd2a1c1edf5a | [
"MIT"
] | 3 | 2021-01-17T14:05:27.000Z | 2022-03-03T06:25:39.000Z | empyric/collection/controllers.py | dmerthe/empyric | 7553b71e241709836cdef156afa7dd2a1c1edf5a | [
"MIT"
] | null | null | null | empyric/collection/controllers.py | dmerthe/empyric | 7553b71e241709836cdef156afa7dd2a1c1edf5a | [
"MIT"
] | 1 | 2021-01-17T14:05:29.000Z | 2021-01-17T14:05:29.000Z | from empyric.adapters import *
from empyric.collection.instrument import *
class OmegaCN7500(Instrument):
"""
Omega model CN7500 PID temperature controller
"""
name = 'OmegaCN7500'
supported_adapters = (
(Modbus, {'slave_mode': 'rtu',
'baud_rate': 38400,
'parity': 'N',
'delay': 0.2}),
)
knobs = (
'output',
'setpoint',
'proportional band',
'integration time',
'derivative time'
)
meters = (
'temperature',
'power'
)
@setter
def set_output(self, state):
if state == 'ON':
self.backend.write_bit(0x814, 1) # turn on output & start PID control
elif state == 'OFF':
self.backend.write_bit(0x814, 0) # turn off output & stop PID control
@setter
def set_setpoint(self, setpoint):
self.write(0x1001, 10*setpoint)
@getter
def get_setpoint(self):
return self.read(0x1001) / 10
@setter
def set_proportional_band(self, P):
self.write(0x1009, int(P))
@getter
def get_proportional_band(self):
return self.read(0x1009)
@setter
def set_integration_time(self, Ti):
self.write(0x100c, int(Ti))
@getter
def get_integration_time(self):
return self.read(0x100c)
@setter
def set_derivative_time(self, Td):
self.write(0x100b, int(Td))
@getter
def get_derivative_time(self):
return self.read(0x100b)
@measurer
def measure_temperature(self):
return self.read(0x1000) / 10
@measurer
def measure_power(self):
return self.read(0x1000) / 10
class RedLionPXU(Instrument):
"""
Red Lion PXU temperature PID controller
"""
name = 'RedLionPXU'
supported_adapters = (
(Modbus, {'buad_rate': 38400}),
)
knobs = (
'output',
'setpoint',
'autotune'
)
meters = (
'temperature',
'power'
)
@setter
def set_output(self, state):
if state == 'ON':
self.backend.write_bit(0x11, 1) # turn on output & start PID control
elif state == 'OFF':
self.backend.write_bit(0x11, 0) # turn off output & stop PID control
@setter
def set_setpoint(self, setpoint):
self.write(0x1, int(setpoint))
@measurer
def measure_temperature(self):
return self.read(0x0)
@measurer
def measure_power(self):
return self.read(0x8) / 10
@setter
def set_autotune(self, state):
if state == 'ON':
self.write(0xf, 1)
elif state == 'OFF':
self.write(0xf, 0)
class WatlowEZZone(Instrument):
"""
Watlow EZ-Zone PID process controller
"""
name = 'WatlowEZZone'
supported_adapters = (
(Modbus, {'baud_rate': 9600}),
)
knobs = (
'setpoint',
)
meters = (
'temperature',
)
@measurer
def measure_temperature(self):
return self.read(360, dtype='float', byte_order=3) # swapped little-endian byte order (= 3 in minimalmodbus)
@getter
def get_setpoint(self):
return self.read(2160, dtype='float', byte_order=3)
@setter
def set_setpoint(self, setpoint):
return self.write(2160, setpoint, dtype='float', byte_order=3)
@getter
def get_proportional_band(self):
return self.read(1890, dtype='float', byte_order=3)
@setter
def set_proportional_band(self, band):
return self.write(1890, band, dtype='float', byte_order=3)
@getter
def get_time_integral(self):
return self.read(1894, dtype='float', byte_order=3)
@setter
def set_time_integral(self, integral):
return self.write(1894, integral, dtype='float', byte_order=3)
@getter
def get_time_derivative(self):
return self.read(1896, dtype='float', byte_order=3)
@setter
def set_time_derivative(self, derivative):
return self.write(1896, derivative, dtype='float', byte_order=3)
| 22.31694 | 117 | 0.588149 | 468 | 4,084 | 5.010684 | 0.224359 | 0.072495 | 0.077612 | 0.099787 | 0.529211 | 0.485714 | 0.438806 | 0.438806 | 0.27548 | 0.17484 | 0 | 0.054552 | 0.295299 | 4,084 | 182 | 118 | 22.43956 | 0.76025 | 0.078355 | 0 | 0.507692 | 0 | 0 | 0.07539 | 0 | 0 | 0 | 0.02504 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.015385 | 0.130769 | 0.453846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
18e80ab1f054cab4110f82ef2bcc62a0377ee9cd | 2,468 | py | Python | bot/main.py | the-rango/Discord-Python-Bot-Tutorial | 5afa7b0b6b2397a0d566bc6009bb7cac2e4354de | [
"Apache-2.0"
] | null | null | null | bot/main.py | the-rango/Discord-Python-Bot-Tutorial | 5afa7b0b6b2397a0d566bc6009bb7cac2e4354de | [
"Apache-2.0"
] | null | null | null | bot/main.py | the-rango/Discord-Python-Bot-Tutorial | 5afa7b0b6b2397a0d566bc6009bb7cac2e4354de | [
"Apache-2.0"
] | null | null | null | # APACHE LICENSE
# Copyright 2020 Stuart Paterson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# External Packages
import os
import discord
from dotenv import load_dotenv
# Local Files
import utils
# Create the bot
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
def get_channel_by_name(client, guild, name):
"""Returns a channel by name from a specific guild"""
for server in client.guilds:
if server == guild:
for channel in server.text_channels:
if channel.name == name:
return channel
@client.event
async def on_ready():
# Triggered when starting up the bot
print(f'{client.user} has connected to Discord!')
@client.event
async def on_member_update(before, after):
if str(before.status) == "offline" and str(after.status) == "online":
# When a user comes online
channel = utils.get_channel_by_name(client, after.guild, 'general')
try:
# Send your message when a user comes online here!
pass
except discord.errors.Forbidden:
pass
@client.event
async def on_message(message):
if message.author == client.user:
# Ignore messages this bot sends
return
current_channel = message.channel
if message.content and len(message.content) > 1 and message.content[0] == '!':
# First we extract the message after the ! then split it on spaces to
# get a list or the arguments the user gave
message_text = message.content[1:]
split_message = message_text.split(" ")
command = split_message[0]
if command == "test":
response = "test successful"
await current_channel.send(response)
elif command == "stop":
await client.logout()
# elif command == "foo":
# # Add your extra commands in blocks like this!
# pass
# Run the bot
client.run(TOKEN)
| 29.380952 | 82 | 0.66329 | 332 | 2,468 | 4.870482 | 0.463855 | 0.037106 | 0.024119 | 0.03525 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006522 | 0.254457 | 2,468 | 83 | 83 | 29.73494 | 0.872283 | 0.407212 | 0 | 0.128205 | 0 | 0 | 0.067927 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0.051282 | 0.102564 | 0 | 0.179487 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
18e9e49334b24d6e872726b2848571c7d6855286 | 624 | py | Python | localpackage/calcs.py | chapmanwilliam/Ogden8 | e17b26609fc3cdd5650bfeba387bd7253513e00e | [
"Apache-2.0"
] | null | null | null | localpackage/calcs.py | chapmanwilliam/Ogden8 | e17b26609fc3cdd5650bfeba387bd7253513e00e | [
"Apache-2.0"
] | null | null | null | localpackage/calcs.py | chapmanwilliam/Ogden8 | e17b26609fc3cdd5650bfeba387bd7253513e00e | [
"Apache-2.0"
] | null | null | null | import os
indentSize=1 #size of the indent
class calcs():
def __init__(self):
self.indent=0
self.txt=[] #text for each line
def clear(self):
self.txt.clear()
self.indent=0
def addCalcs(self,calc):
s=[' ' * self.indent+ t for t in calc.txt]
self.txt += s
def addText(self,txt):
txt=' ' * self.indent + txt
self.txt.append(txt)
def show(self):
return os.linesep.join(self.txt)
def inDent(self):
self.indent+=indentSize
def outDent(self):
if self.indent-indentSize>0:
self.indent-=indentSize | 20.8 | 50 | 0.56891 | 85 | 624 | 4.129412 | 0.388235 | 0.19943 | 0.17094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009238 | 0.30609 | 624 | 30 | 51 | 20.8 | 0.801386 | 0.057692 | 0 | 0.090909 | 0 | 0 | 0.003407 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.318182 | false | 0 | 0.045455 | 0.045455 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18eaed4c6444d0552d8dc7a9cc73624816ce21fa | 3,958 | py | Python | grpc-errors/stub/hello_pb2.py | twotwo/tools-python | b9e7a97e58fb0a3f3fb5e8b674e64a997669c2c4 | [
"MIT"
] | null | null | null | grpc-errors/stub/hello_pb2.py | twotwo/tools-python | b9e7a97e58fb0a3f3fb5e8b674e64a997669c2c4 | [
"MIT"
] | null | null | null | grpc-errors/stub/hello_pb2.py | twotwo/tools-python | b9e7a97e58fb0a3f3fb5e8b674e64a997669c2c4 | [
"MIT"
] | 1 | 2016-10-21T07:51:24.000Z | 2016-10-21T07:51:24.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hello.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='hello.proto',
package='hello',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0bhello.proto\x12\x05hello\"\x18\n\x08HelloReq\x12\x0c\n\x04Name\x18\x01 \x01(\t\"\x1b\n\tHelloResp\x12\x0e\n\x06Result\x18\x01 \x01(\t2v\n\x0cHelloService\x12/\n\x08SayHello\x12\x0f.hello.HelloReq\x1a\x10.hello.HelloResp\"\x00\x12\x35\n\x0eSayHelloStrict\x12\x0f.hello.HelloReq\x1a\x10.hello.HelloResp\"\x00\x62\x06proto3')
)
_HELLOREQ = _descriptor.Descriptor(
name='HelloReq',
full_name='hello.HelloReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Name', full_name='hello.HelloReq.Name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=46,
)
_HELLORESP = _descriptor.Descriptor(
name='HelloResp',
full_name='hello.HelloResp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Result', full_name='hello.HelloResp.Result', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=75,
)
DESCRIPTOR.message_types_by_name['HelloReq'] = _HELLOREQ
DESCRIPTOR.message_types_by_name['HelloResp'] = _HELLORESP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HelloReq = _reflection.GeneratedProtocolMessageType('HelloReq', (_message.Message,), {
'DESCRIPTOR' : _HELLOREQ,
'__module__' : 'hello_pb2'
# @@protoc_insertion_point(class_scope:hello.HelloReq)
})
_sym_db.RegisterMessage(HelloReq)
HelloResp = _reflection.GeneratedProtocolMessageType('HelloResp', (_message.Message,), {
'DESCRIPTOR' : _HELLORESP,
'__module__' : 'hello_pb2'
# @@protoc_insertion_point(class_scope:hello.HelloResp)
})
_sym_db.RegisterMessage(HelloResp)
_HELLOSERVICE = _descriptor.ServiceDescriptor(
name='HelloService',
full_name='hello.HelloService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=77,
serialized_end=195,
methods=[
_descriptor.MethodDescriptor(
name='SayHello',
full_name='hello.HelloService.SayHello',
index=0,
containing_service=None,
input_type=_HELLOREQ,
output_type=_HELLORESP,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='SayHelloStrict',
full_name='hello.HelloService.SayHelloStrict',
index=1,
containing_service=None,
input_type=_HELLOREQ,
output_type=_HELLORESP,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_HELLOSERVICE)
DESCRIPTOR.services_by_name['HelloService'] = _HELLOSERVICE
# @@protoc_insertion_point(module_scope)
| 27.678322 | 348 | 0.741031 | 468 | 3,958 | 5.982906 | 0.284188 | 0.025714 | 0.06 | 0.034286 | 0.415 | 0.395 | 0.395 | 0.395 | 0.395 | 0.33 | 0 | 0.029284 | 0.1286 | 3,958 | 142 | 349 | 27.873239 | 0.782546 | 0.070237 | 0 | 0.573913 | 1 | 0.008696 | 0.187636 | 0.110566 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.043478 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18eb73361ec3feb33d8a12b5b8881d917685a4cc | 504 | py | Python | ckanext-sitemap/ckanext/sitemap/plugin.py | alexandru-m-g/hdx-ckan | 647f1f23f0505fa195601245b758edcaf4d25985 | [
"Apache-2.0"
] | 1 | 2020-03-07T02:47:15.000Z | 2020-03-07T02:47:15.000Z | ckanext-sitemap/ckanext/sitemap/plugin.py | datopian/hdx-ckan | 2d8871c035a18e48b53859fec522b997b500afe9 | [
"Apache-2.0"
] | null | null | null | ckanext-sitemap/ckanext/sitemap/plugin.py | datopian/hdx-ckan | 2d8871c035a18e48b53859fec522b997b500afe9 | [
"Apache-2.0"
] | null | null | null | '''
Sitemap plugin for CKAN
'''
from ckan.plugins import implements, SingletonPlugin
from ckan.plugins import IRoutes
class SitemapPlugin(SingletonPlugin):
implements(IRoutes, inherit=True)
def before_map(self, map):
controller='ckanext.sitemap.controller:SitemapController'
map.connect('sitemap', '/sitemap.xml', controller=controller, action='view')
map.connect('sitemap_page', '/sitemap{page}.xml', controller=controller, action='index')
return map
| 29.647059 | 96 | 0.712302 | 54 | 504 | 6.611111 | 0.518519 | 0.044818 | 0.084034 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.168651 | 504 | 16 | 97 | 31.5 | 0.852029 | 0.045635 | 0 | 0 | 0 | 0 | 0.215645 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
18ecd7bb8ba5638e693807de98d542a164bfce66 | 2,870 | py | Python | Figure_2/panel_a_Count_mC_bin.py | Wustl-Zhanglab/Placenta_Epigenome | 227f2a42e5c0af821d372b42c9bcf9e561e4627c | [
"MIT"
] | 2 | 2021-06-28T09:16:17.000Z | 2021-07-15T02:39:35.000Z | Figure_2/panel_a_Count_mC_bin.py | Wustl-Zhanglab/Placenta_Epigenome | 227f2a42e5c0af821d372b42c9bcf9e561e4627c | [
"MIT"
] | null | null | null | Figure_2/panel_a_Count_mC_bin.py | Wustl-Zhanglab/Placenta_Epigenome | 227f2a42e5c0af821d372b42c9bcf9e561e4627c | [
"MIT"
] | 2 | 2020-05-29T01:06:19.000Z | 2021-07-02T01:04:50.000Z | #!/usr/bin/python
# programmer : Bo
# usage: Count_Reads_bin.py file_list
import sys
import re
import random
import string
import time
def main(X):
try:
print 'opening file :',X
infile = open(X,"r").readlines()
print 'Total ',len(infile),' lines.'
return infile
except IOError,message:
print >> sys.stderr, "cannot open file",message
sys.exit(1)
def Read_data():
X = main('numM10K.bin.bed')
name = []
reads = []
score = []
site = {}
tt = 'V1\tV2\tV3\tV4\n'
for n in range(len(X)):
te = X[n][:-1].split('\t')
if te[0] not in site.keys():
print 'adding',te[0]
site[te[0]] = {}
w = int(len(te[1])/2)
tag = te[1][:w+1]
#if tag not in site[te[0]].keys():
# site[te[0]][tag] = {}
try:
site[te[0]][tag][te[1]] = n-1
except:
site[te[0]][tag] = {}
site[te[0]][tag][te[1]] = n-1
name.append(X[n][:-1])
reads.append(0)
score.append(0.0)
return site, name, reads,score,tt
def Read_blacklist():
bl = main('hg19_blacklist.bed')
BL = {}
for each in bl:
te = each[:-1].split('\t')
if te[0] not in BL.keys():
BL[te[0]]= []
BL[te[0]].append([int(te[1]),int(te[2])])
return BL
if __name__=="__main__":
tS = time.time()
bin = 50000
BL = Read_blacklist()
#(B_site,B_name,C_reads,tt) = Read_data(sys.argv[1])
OP = main(sys.argv[1])
for each in OP:
(B_site,B_name,B_reads,B_score,tt) = Read_data()
data = main(each[:-1])
n = 0
m = 0
out = file('M50K_'+'_'+each[:-1],'w')
#out.write(tt)
for each in data:
n += 1
if n == 1000000:
m += 1
n = 0
print m,'million reads'
te = each.split('\t')
start = int(te[1])
end = int(te[2])
if te[0] not in B_site.keys():
continue
if te[0] in BL.keys():
for ebi in range(len(BL[te[0]])):
if start < BL[te[0]][ebi][1] and end > BL[te[0]][ebi][0]:
continue
ss = int(0.5+(start/50000))*50000
s = str(ss)
w =int( len(s)/2)
tag = s[:w+1]
try :
y = B_site[te[0]][tag][s]
except:
continue
B_reads[y] += 1
B_score[y] += float(te[-1])
for i in range(len(B_name)):
if B_reads[i] == 0:
out.write(B_name[i]+'\t0\t0\n')
else:
out.write(B_name[i]+'\t'+str(B_reads[i])+'\t'+str(B_score[i]/B_reads[i])+'\n')
out.close()
tE = time.time()
print 'Cost ',(tE-tS),' sec'
| 27.075472 | 94 | 0.444599 | 418 | 2,870 | 2.964115 | 0.239234 | 0.041162 | 0.039548 | 0.040355 | 0.082324 | 0.051655 | 0.051655 | 0.051655 | 0 | 0 | 0 | 0.049245 | 0.377352 | 2,870 | 105 | 95 | 27.333333 | 0.644096 | 0.066899 | 0 | 0.133333 | 0 | 0 | 0.058405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.055556 | null | null | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18ef5021800d056c99fea4a85de29d3c6771923f | 390 | py | Python | examples/example1.py | wallrj/twisted-names-talk | d3098ab6745abd0d14bb0b6eef41727e5a89de1f | [
"MIT"
] | 2 | 2017-12-01T00:14:25.000Z | 2020-07-01T00:27:44.000Z | examples/example1.py | wallrj/twisted-names-talk | d3098ab6745abd0d14bb0b6eef41727e5a89de1f | [
"MIT"
] | null | null | null | examples/example1.py | wallrj/twisted-names-talk | d3098ab6745abd0d14bb0b6eef41727e5a89de1f | [
"MIT"
] | null | null | null | from twisted.internet import task
from twisted.names import dns
def main(reactor):
proto = dns.DNSDatagramProtocol(controller=None)
reactor.listenUDP(0, proto)
d = proto.query(('8.8.8.8', 53), [dns.Query('www.example.com', dns.AAAA)])
d.addCallback(printResult)
return d
def printResult(res):
print 'ANSWERS: ', [a.payload for a in res.answers]
task.react(main)
| 24.375 | 78 | 0.697436 | 57 | 390 | 4.77193 | 0.614035 | 0.022059 | 0.022059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021407 | 0.161538 | 390 | 15 | 79 | 26 | 0.810398 | 0 | 0 | 0 | 0 | 0 | 0.079487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.181818 | null | null | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18f0f41a4a703e23e45d0e7b9b74208ed5cbd775 | 1,294 | py | Python | setup.py | jeremycline/crochet | ecfc22cefa90f3dfbafa71883c1470e7294f2b6d | [
"MIT"
] | null | null | null | setup.py | jeremycline/crochet | ecfc22cefa90f3dfbafa71883c1470e7294f2b6d | [
"MIT"
] | null | null | null | setup.py | jeremycline/crochet | ecfc22cefa90f3dfbafa71883c1470e7294f2b6d | [
"MIT"
] | 1 | 2020-01-25T18:00:31.000Z | 2020-01-25T18:00:31.000Z | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
def read(path):
"""
Read the contents of a file.
"""
with open(path) as f:
return f.read()
setup(
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
name='crochet',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Use Twisted anywhere!",
install_requires=[
"Twisted>=15.0",
"wrapt",
],
keywords="twisted threading",
license="MIT",
packages=["crochet", "crochet.tests"],
url="https://github.com/itamarst/crochet",
maintainer='Itamar Turner-Trauring',
maintainer_email='itamar@itamarst.org',
long_description=read('README.rst') + '\n' + read('docs/news.rst'),
)
| 28.130435 | 71 | 0.616692 | 132 | 1,294 | 6.007576 | 0.621212 | 0.167718 | 0.220681 | 0.098361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011179 | 0.239567 | 1,294 | 45 | 72 | 28.755556 | 0.794715 | 0.021638 | 0 | 0.054054 | 0 | 0 | 0.4664 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.108108 | 0 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18f2ad5a7c870598e6dec3394ee47ca770ec9558 | 3,289 | py | Python | tests/test_nacl.py | intangere/NewHope_X25519_XSalsa20_Poly1305 | 459914e520bcb5aa207a11533ae217d50719307d | [
"MIT"
] | null | null | null | tests/test_nacl.py | intangere/NewHope_X25519_XSalsa20_Poly1305 | 459914e520bcb5aa207a11533ae217d50719307d | [
"MIT"
] | 1 | 2021-06-21T03:07:13.000Z | 2021-06-21T03:07:13.000Z | tests/test_nacl.py | intangere/NewHope_X25519_XSalsa20_Poly1305 | 459914e520bcb5aa207a11533ae217d50719307d | [
"MIT"
] | null | null | null | # Import libnacl libs
import libnacl
import libnacl.utils
# Import python libs
import unittest
class TestPublic(unittest.TestCase):
'''
Test public functions
'''
def test_gen(self):
pk1, sk1 = libnacl.crypto_box_keypair()
pk2, sk2 = libnacl.crypto_box_keypair()
pk3, sk3 = libnacl.crypto_box_keypair()
self.assertEqual(len(pk1), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(sk1), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(pk2), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(sk2), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(pk3), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertEqual(len(sk3), libnacl.crypto_box_PUBLICKEYBYTES)
self.assertNotEqual(pk1, sk1)
self.assertNotEqual(pk2, sk2)
self.assertNotEqual(pk3, sk3)
self.assertNotEqual(pk1, pk2)
self.assertNotEqual(pk1, pk3)
self.assertNotEqual(sk1, sk2)
self.assertNotEqual(sk2, sk3)
def test_box(self):
msg = b'Are you suggesting coconuts migrate?'
# run 1
nonce1 = libnacl.utils.rand_nonce()
pk1, sk1 = libnacl.crypto_box_keypair()
pk2, sk2 = libnacl.crypto_box_keypair()
enc_msg = libnacl.crypto_box(msg, nonce1, pk2, sk1)
self.assertNotEqual(msg, enc_msg)
clear_msg = libnacl.crypto_box_open(enc_msg, nonce1, pk1, sk2)
self.assertEqual(clear_msg, msg)
# run 2
nonce2 = libnacl.utils.rand_nonce()
pk3, sk3 = libnacl.crypto_box_keypair()
pk4, sk4 = libnacl.crypto_box_keypair()
enc_msg2 = libnacl.crypto_box(msg, nonce2, pk4, sk3)
self.assertNotEqual(msg, enc_msg2)
clear_msg2 = libnacl.crypto_box_open(enc_msg2, nonce2, pk3, sk4)
self.assertEqual(clear_msg2, msg)
# Check bits
self.assertNotEqual(nonce1, nonce2)
self.assertNotEqual(enc_msg, enc_msg2)
def test_boxnm(self):
msg = b'Are you suggesting coconuts migrate?'
# run 1
nonce1 = libnacl.utils.rand_nonce()
pk1, sk1 = libnacl.crypto_box_keypair()
pk2, sk2 = libnacl.crypto_box_keypair()
k1 = libnacl.crypto_box_beforenm(pk2, sk1)
k2 = libnacl.crypto_box_beforenm(pk1, sk2)
enc_msg = libnacl.crypto_box_afternm(msg, nonce1, k1)
self.assertNotEqual(msg, enc_msg)
clear_msg = libnacl.crypto_box_open_afternm(enc_msg, nonce1, k2)
self.assertEqual(clear_msg, msg)
def test_box_seal(self):
msg = b'Are you suggesting coconuts migrate?'
print(msg)
# run 1
pk, sk = libnacl.crypto_box_keypair()
enc_msg = libnacl.crypto_box_seal(msg, pk)
self.assertNotEqual(msg, enc_msg)
clear_msg = libnacl.crypto_box_seal_open(enc_msg, pk, sk)
self.assertEqual(clear_msg, msg)
print(clear_msg)
# run 2
pk2, sk2 = libnacl.crypto_box_keypair()
enc_msg2 = libnacl.crypto_box_seal(msg, pk2)
self.assertNotEqual(msg, enc_msg2)
clear_msg2 = libnacl.crypto_box_seal_open(enc_msg2, pk2, sk2)
self.assertEqual(clear_msg2, msg)
# Check bits
self.assertNotEqual(enc_msg, enc_msg2)
t = TestPublic()
t.test_box_seal() | 38.244186 | 72 | 0.663728 | 418 | 3,289 | 4.985646 | 0.143541 | 0.180902 | 0.222649 | 0.121401 | 0.678983 | 0.603167 | 0.543666 | 0.425624 | 0.40691 | 0.278791 | 0 | 0.034786 | 0.239587 | 3,289 | 86 | 73 | 38.244186 | 0.798481 | 0.034357 | 0 | 0.38806 | 0 | 0 | 0.034253 | 0 | 0 | 0 | 0 | 0 | 0.38806 | 1 | 0.059701 | false | 0 | 0.044776 | 0 | 0.119403 | 0.029851 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18f9f056fd0c54a5b1e0f0f03ecf846e53698354 | 484 | py | Python | mayan/__init__.py | sneha-rk/drawings-version-control | 4e5a2bf0fd8b8026f1d3d56917b5be4b5c7be497 | [
"Apache-2.0"
] | 1 | 2021-05-14T18:40:37.000Z | 2021-05-14T18:40:37.000Z | mayan/__init__.py | sneha-rk/drawings-version-control | 4e5a2bf0fd8b8026f1d3d56917b5be4b5c7be497 | [
"Apache-2.0"
] | null | null | null | mayan/__init__.py | sneha-rk/drawings-version-control | 4e5a2bf0fd8b8026f1d3d56917b5be4b5c7be497 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
<<<<<<< HEAD
__title__ = 'Mayan EDMS'
__version__ = '2.7.3'
__build__ = 0x020703
=======
__title__ = 'IITH DVC'
__version__ = '2.7.2'
__build__ = 0x020702
>>>>>>> 4cedd41ab6b9750abaebc35d1970556408d83cf5
__author__ = 'Roberto Rosario'
__author_email__ = 'roberto.rosario@mayan-edms.com'
__description__ = 'Free Open Source Electronic Document Management System'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2011-2016 Roberto Rosario'
| 28.470588 | 74 | 0.760331 | 53 | 484 | 6 | 0.716981 | 0.132075 | 0.056604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123832 | 0.115702 | 484 | 16 | 75 | 30.25 | 0.619159 | 0 | 0 | 0 | 0 | 0 | 0.355372 | 0.061983 | 0 | 0 | 0.033058 | 0 | 0 | 0 | null | null | 0 | 0.066667 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18fdbb6a59afbc92dbdea6d53c5bce95efda434c | 5,321 | py | Python | server/py/camera.py | sreyas/Attendance-management-system | eeb57bcc942f407151b71bfab528e817c6806c74 | [
"MIT"
] | null | null | null | server/py/camera.py | sreyas/Attendance-management-system | eeb57bcc942f407151b71bfab528e817c6806c74 | [
"MIT"
] | null | null | null | server/py/camera.py | sreyas/Attendance-management-system | eeb57bcc942f407151b71bfab528e817c6806c74 | [
"MIT"
] | null | null | null | import cv2
import sys,json,numpy as np
import glob,os
import face_recognition
import datetime
from pathlib import Path
from pymongo import MongoClient
from flask_mongoengine import MongoEngine
from bson.objectid import ObjectId
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
client = MongoClient(port=27017)
db=client.GetMeThrough;
home = str(os.path.dirname(os.path.abspath(__file__))) + "/../../"
known_encodings_file_path = home + "/data/known_encodings_file.csv"
people_file_path = home + "/data/people_file.csv"
known_encodings_file = Path(known_encodings_file_path)
if known_encodings_file.is_file():
known_encodings = np.genfromtxt(known_encodings_file, delimiter=',')
else:
known_encodings = []
people_file = Path(people_file_path)
if people_file.is_file():
people = np.genfromtxt(people_file, dtype='U',delimiter=',')
else:
people = []
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
camera = db.addconfigurations.find_one({'_id': ObjectId("5aaa4d382ca2233631b55ab4") })
self.video = cv2.VideoCapture(camera['configuration'])
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
def __del__(self):
self.video.release()
def compare_faces(self ,detectimage):
face_locations = face_recognition.face_locations(detectimage)
face_encodings = face_recognition.face_encodings(detectimage, face_locations)
match =[]
for face_encoding in face_encodings:
match = face_recognition.compare_faces(known_encodings, face_encoding)
return match
def get_name(self,peoplename):
collection = db['profiles']
cursor = collection.find()
for document in cursor:
profileimagepath = document['imagepath'];
category = document['category'];
imagecsv = profileimagepath.split('known_people/');
filename = imagecsv[1].split('.');
imagefilename = filename[0];
if(peoplename == imagefilename ):
usercategory = db.user_categories.find_one({'_id': ObjectId(category) })
text = usercategory['Category']
return text
else:
return "Unknown"
def insertattendance(self,peoplename):
collection = db['profiles']
cursor = collection.find()
for document in cursor:
profileimagepath = document['imagepath'];
category = document['category'];
user = document['user'];
imagecsv = profileimagepath.split('known_people/');
filename = imagecsv[1].split('.');
imagefilename = filename[0];
if(peoplename == imagefilename):
current_date =datetime.datetime.now()
attendance= {"user":user,"date_time" :str(current_date)}
date_format = "%Y-%m-%d %H:%M:%S.%f"
attendance_system = db.attendance.find({"user": user})
res = [col.encode('utf8') if isinstance(col, unicode) else col for col in attendance_system]
if not res:
db.attendances.insert_one(attendance).inserted_id
else:
for attendance_doc in res:
date_time = attendance_doc['date_time']
time1 = datetime.datetime.strptime(date_time.encode('utf8'), date_format)
time2 = datetime.datetime.strptime(str(datetime.datetime.now()), date_format)
diff = time2 - time1
minutes = (diff.seconds) / 60
if(minutes >=30):
db.attendances.insert_one(attendance).inserted_id
def get_frame(self):
success, image = self.video.read()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
faces = face_cascade.detectMultiScale(image, 1.3, 5)
for (x, y, w, h) in faces:
match = self.compare_faces(image);
name = "Unknown"
for i in range(len(match)):
if match[i]:
face_detect_name = self.get_name(people[i])
name = face_detect_name
self.insertattendance(people[i])
color = (0, 255, 0)
break;
if "Unknown" in name:
color = (0, 0, 255)
name = "Unknown"
if "Blacklist" in name:
color = (0, 0, 0)
name = "Blacklist"
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(image, name,(x + w, y + h), font, 1.0, (255,255,255), 1)
crop_img = image[y: y + h, x: x + w]
cv2.imwrite(home + "/data/face.jpg", crop_img)
ret, jpeg = cv2.imencode('.jpg', image)
img_str = jpeg.tostring();
return jpeg.tobytes()
| 41.248062 | 100 | 0.595001 | 601 | 5,321 | 5.109817 | 0.334443 | 0.041029 | 0.035168 | 0.021491 | 0.191469 | 0.183002 | 0.183002 | 0.15565 | 0.15565 | 0.15565 | 0 | 0.02008 | 0.298064 | 5,321 | 128 | 101 | 41.570313 | 0.802142 | 0.078369 | 0 | 0.242991 | 0 | 0 | 0.070903 | 0.022477 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.084112 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
18ffb685c2a877f7f518f970f9a6eafbcd304771 | 2,099 | py | Python | apps/comments/migrations/0001_initial.py | puertoricanDev/horas | 28597af13409edd088a71143d2f4c94cd7fd83f5 | [
"MIT"
] | 10 | 2015-01-18T02:39:35.000Z | 2021-11-09T22:53:10.000Z | apps/comments/migrations/0001_initial.py | puertoricanDev/horas | 28597af13409edd088a71143d2f4c94cd7fd83f5 | [
"MIT"
] | 52 | 2015-03-02T17:46:23.000Z | 2022-02-10T13:23:11.000Z | apps/comments/migrations/0001_initial.py | puertoricanDev/horas | 28597af13409edd088a71143d2f4c94cd7fd83f5 | [
"MIT"
] | 7 | 2015-03-02T01:23:35.000Z | 2021-11-09T22:58:39.000Z | # Generated by Django 1.10.6 on 2017-03-13 04:46
# Modified by Raúl Negrón on 2019-06-22 16:48
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import apps.core.models
class Migration(migrations.Migration):
initial = True
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Comment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"date_created",
apps.core.models.DateTimeCreatedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
(
"date_modified",
apps.core.models.DateTimeModifiedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
("object_id", models.PositiveIntegerField()),
("comment", models.TextField()),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="users",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ("date_created",)},
)
]
| 31.328358 | 85 | 0.45212 | 159 | 2,099 | 5.842767 | 0.503145 | 0.034446 | 0.04521 | 0.071044 | 0.223897 | 0.223897 | 0.223897 | 0.223897 | 0.223897 | 0 | 0 | 0.02812 | 0.457837 | 2,099 | 66 | 86 | 31.80303 | 0.788225 | 0.042878 | 0 | 0.280702 | 1 | 0 | 0.078764 | 0.026421 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.087719 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a15cfeb891a079af5b1c667c60e264effefd0f3 | 4,602 | py | Python | main.py | Lorn-Hukka/academy-record-sender | 137ef9d1dff373662a046bc2a50d7dd5f4fad0ee | [
"MIT"
] | null | null | null | main.py | Lorn-Hukka/academy-record-sender | 137ef9d1dff373662a046bc2a50d7dd5f4fad0ee | [
"MIT"
] | null | null | null | main.py | Lorn-Hukka/academy-record-sender | 137ef9d1dff373662a046bc2a50d7dd5f4fad0ee | [
"MIT"
] | null | null | null | import random, os, string, subprocess, shutil, requests
from discord import Webhook, RequestsWebhookAdapter, Embed
from dotenv import dotenv_values
import argparse, colorama
from colorama import Fore
class Settings():
def __init__(self):
for k, v in dotenv_values(".settings").items():
setattr(self, k, v)
class App():
def __init__(self, config):
self.config = config
self.webhook = Webhook.from_url(self.config.WEBHOOK, adapter=RequestsWebhookAdapter())
self.output_path = self.config.RECORDS_PATH + '\\output\\'
self.processed_path = self.config.RECORDS_PATH + '\\processed\\'
def gen_pass(self, lenght):
chars = string.ascii_letters + string.digits + "!#$%&()*+<=>?@[]^_|~"
password = ''.join(random.choices(chars, k=lenght))
return password
def _check_7zip(self):
if not os.path.isfile(self.config._7ZIP):
exit(f'{Fore.RED}WRONG path to 7ZIP executable. Program Exited.')
def _generate_dirs(self):
if not os.path.isdir(self.processed_path):
os.mkdir(self.processed_path)
print(f'{Fore.YELLOW}Path for proccsed records not found. Created one for you.')
if not os.path.isdir(self.output_path):
os.mkdir(self.output_path)
print(f'{Fore.YELLOW}Output path not found. Created one for you.')
def process_files(self):
with open('passwords', 'a+', encoding="utf-8") as f:
for fn in os.listdir(self.config.RECORDS_PATH):
if fn.endswith(self.config.EXTENSION):
file_password, link_password = self.gen_pass(16), self.gen_pass(16)
command = [self.config._7ZIP, 'a -mx9 -mhe=on -y -r', f'-p"{file_password}"',
'--', f'"{self.output_path + fn[:-len(self.config.EXTENSION)]}.7z"', f'"{self.config.RECORDS_PATH}\\{fn}"']
subprocess.run(" ".join(command))
shutil.move(self.config.RECORDS_PATH + '\\' + fn, self.processed_path + fn)
f.write(f'F: {fn} | FP: {file_password} | LP: {link_password} | L: \n')
def send_2_discord(self):
data = None
with open('passwords', 'r', encoding="utf-8") as f:
data = [line.strip('\n').split(' | ') for line in f.readlines()]
with open('passwords', 'w+', encoding="utf-8") as f:
for line in data:
fn = line[0][2::].strip(' ')
file_password = line[1][3::].strip(' ')
link_password = line[2][3::].strip(' ')
link = line[3][2::].strip(' ')
if link == '':
print(f'{Fore.YELLOW}{fn} SKIPPED - No SHARE LINK specified.')
f.write(' | '.join(line) + '\n')
continue
if line[0][0] == '*':
f.write(' | '.join(line) + '\n')
continue
else:
f.write('*' + ' | '.join(line) + '\n')
msg = {
'title': f'{fn}',
'description': 'W razie wątpliwości pytać na <#809980920249319465>;',
'fields': [
{'name': 'Link do nagrania:', 'value': f'[Kliknij, aby się przenieść.]({link})', 'inline': False},
{'name': 'Hasło dostępu:', 'value': f'```{link_password}```', 'inline': True},
{'name': 'Hasło do pliku:', 'value': f'```{file_password}```', 'inline': True}
],
'footer': {
'text': f'~{self.config.NAME}', 'inline': True
}
}
self.webhook.send('Nowe nagranie zostało udostępnione.', username='Student.', embed=Embed().from_dict(msg),
avatar_url="https://cdn4.iconfinder.com/data/icons/science-131/64/265-512.png")
def run(self):
self._check_7zip()
self._generate_dirs()
self.process_files()
self.send_2_discord()
if __name__ == "__main__":
colorama.init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="Display errors in console.", action="store_true", default=False)
args = parser.parse_args()
CONFIG = Settings()
app = App(CONFIG)
try:
app.run()
except Exception as e:
if args.verbose:
print(e)
exit(f'{Fore.RED}An Error occured program will exit.')
| 40.368421 | 140 | 0.526945 | 521 | 4,602 | 4.525912 | 0.364683 | 0.055131 | 0.036048 | 0.044529 | 0.150551 | 0.072095 | 0 | 0 | 0 | 0 | 0 | 0.01784 | 0.317905 | 4,602 | 113 | 141 | 40.725664 | 0.733355 | 0 | 0 | 0.044444 | 0 | 0.022222 | 0.226858 | 0.029335 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088889 | false | 0.144444 | 0.055556 | 0 | 0.177778 | 0.044444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7a1ab771a442031e1729dd19987c53780afb2187 | 3,447 | py | Python | tests/bin/test_tcex_list.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | tests/bin/test_tcex_list.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | tests/bin/test_tcex_list.py | phuerta-tc/tcex | 4a4e800e1a6114c1fde663f8c3ab7a1d58045c79 | [
"Apache-2.0"
] | null | null | null | """Bin Testing"""
# standard library
from importlib.machinery import SourceFileLoader
from importlib.util import module_from_spec, spec_from_loader
from typing import List
# third-party
from typer.testing import CliRunner
# dynamically load bin/tcex file
spec = spec_from_loader('app', SourceFileLoader('app', 'bin/tcex'))
tcex_cli = module_from_spec(spec)
spec.loader.exec_module(tcex_cli)
# get app from bin/tcex CLI script
app = tcex_cli.app
# get instance of typer CliRunner for test case
runner = CliRunner()
class TestTcexCliList:
"""Tcex CLI Testing."""
def setup_method(self):
"""Configure teardown before all tests."""
def teardown_method(self):
"""Configure teardown before all tests."""
@staticmethod
def _run_command(args: List[str]) -> str:
"""Test Case"""
result = runner.invoke(app, args)
return result
def test_tcex_list(self) -> None:
"""Test Case"""
result = self._run_command(['list'])
assert result.exit_code == 0, result.stdout
# spot check a few lines of outputs
assert 'Organization Templates' in result.stdout
assert 'Playbook Templates' in result.stdout
# TODO: [med] update this once template is done
# assert 'API Service Templates' in result.stdout
# assert 'Trigger Service Templates' in result.stdout
# assert 'Webhook Trigger Service Templates' in result.stdout
# TODO: [med] update this once template is done
# def test_tcex_list_external_api_service(self) -> None:
# """Test Case"""
# result = self._run_command(['list', '--type', 'api_service'])
# assert result.exit_code == 0, result.stdout
# # spot check a few lines of outputs
# assert 'basic' in result.stdout
# TODO: [med] update this once template is done
# def test_tcex_list_external_basic(self) -> None:
# """Test Case"""
# result = self._run_command(['list', '--type', 'external'])
# assert result.exit_code == 0, result.stdout
# # spot check a few lines of outputs
# assert 'basic' in result.stdout
def test_tcex_list_organization_basic(self) -> None:
"""Test Case"""
result = self._run_command(['list', '--type', 'organization'])
assert result.exit_code == 0, result.stdout
# spot check a few lines of outputs
assert 'basic' in result.stdout
def test_tcex_list_playbook_basic(self) -> None:
"""Test Case"""
result = self._run_command(['list', '--type', 'playbook'])
assert result.exit_code == 0, f'{result.stdout}'
# spot check a few lines of outputs
assert 'basic' in result.stdout
# TODO: [med] update this once template is done
# def test_tcex_list_trigger_basic(self) -> None:
# """Test Case"""
# result = self._run_command(['list', '--type', 'trigger_service'])
# assert result.exit_code == 0, result.stdout
# # spot check a few lines of outputs
# assert 'basic' in result.stdout
# TODO: [med] update this once template is done
# def test_tcex_list_webhook_trigger_basic(self) -> None:
# """Test Case"""
# result = self._run_command(['list', '--type', 'webhook_trigger_service'])
# assert result.exit_code == 0, result.stdout
# # spot check a few lines of outputs
# assert 'basic' in result.stdout
| 33.794118 | 83 | 0.642878 | 441 | 3,447 | 4.868481 | 0.192744 | 0.100606 | 0.071728 | 0.048905 | 0.691663 | 0.668374 | 0.625058 | 0.586865 | 0.586865 | 0.568235 | 0 | 0.002677 | 0.241369 | 3,447 | 101 | 84 | 34.128713 | 0.818356 | 0.531477 | 0 | 0.137931 | 0 | 0 | 0.08034 | 0 | 0 | 0 | 0 | 0.009901 | 0.241379 | 1 | 0.206897 | false | 0 | 0.137931 | 0 | 0.413793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7a1ef72332e8f8f0f2089763d5744f430bdbbf1f | 2,365 | py | Python | log_parser/single_hand_efficiency_training_data.py | xinranhe/mahjong | 8cfc6234f9c80fd11267adf06b420b63f4c8d87d | [
"MIT"
] | null | null | null | log_parser/single_hand_efficiency_training_data.py | xinranhe/mahjong | 8cfc6234f9c80fd11267adf06b420b63f4c8d87d | [
"MIT"
] | null | null | null | log_parser/single_hand_efficiency_training_data.py | xinranhe/mahjong | 8cfc6234f9c80fd11267adf06b420b63f4c8d87d | [
"MIT"
] | null | null | null | import argparse
from mahjong.shanten import Shanten
from multiprocessing import Pool
import os
import sys
from log_parser.discard_prediction_parser import parse_discard_prediction
SHANTEN = Shanten()
INPUT_DATA_FOLDER = "data/raw"
OUTPUT_DATA_DIR = "data/single_hand_efficiency"
def tiles34_to_list(tiles):
result = []
for i in xrange(34):
for j in xrange(tiles[i]):
result.append(i)
return sorted(result)
def generate_data(folder):
folder_path = "%s/%s" % (INPUT_DATA_FOLDER, folder)
writer = open("%s/%s.txt" % (OUTPUT_DATA_DIR, folder), "w")
num_hands = [0] * 7
num_failed_files = 0
for i, file in enumerate(os.listdir(folder_path)):
print "processed %d files with %d failed: %s records" % (i, num_failed_files, ",".join([str(n) for n in num_hands]))
file_path = "%s/%s" % (folder_path, file)
try:
games = parse_discard_prediction(open(file_path, "r").read())
for game in games:
for one_round in game.one_round:
hais = one_round.center_player.hand
if len(hais) != 14:
continue
hand = [0] * 34
for hai in hais:
hand[hai.id] += 1
if hand[one_round.discarded_hai.id] <= 0:
continue
hand[one_round.discarded_hai.id] -= 1
shanten = int(SHANTEN.calculate_shanten(hand))
num_hands[shanten] += 1
writer.write("%d:%s\n" % (shanten, ",".join([str(i) for i in tiles34_to_list(hand)])))
except:
num_failed_files += 1
print "Failed in parseing:", file_path
if __name__ == '__main__':
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument('--start_date', default='')
parser.add_argument('--end_date', default='')
known_args, _ = parser.parse_known_args(sys.argv)
date_to_process = []
for date in os.listdir(INPUT_DATA_FOLDER):
if date >= known_args.start_date and date <= known_args.end_date:
date_to_process.append(date)
print date_to_process
generate_data(date_to_process[0])
# multithread generate training data
#p = Pool(NUM_THREADS)
#p.map(generate_data, date_to_process)
| 35.833333 | 124 | 0.60296 | 307 | 2,365 | 4.374593 | 0.348534 | 0.029784 | 0.048399 | 0.031273 | 0.075949 | 0.038719 | 0 | 0 | 0 | 0 | 0 | 0.011834 | 0.285412 | 2,365 | 65 | 125 | 36.384615 | 0.78284 | 0.038901 | 0 | 0.037736 | 0 | 0 | 0.070516 | 0.0119 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.113208 | null | null | 0.056604 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e1314b5fc0e2d4894cb7cecd74444fc00587afb1 | 190 | py | Python | prob_020.py | tansly/euler | 8b420cb05223cf60b6c01aac9bfe8ce5a3b96ddc | [
"MIT"
] | 1 | 2017-02-13T19:00:59.000Z | 2017-02-13T19:00:59.000Z | prob_020.py | tansly/euler | 8b420cb05223cf60b6c01aac9bfe8ce5a3b96ddc | [
"MIT"
] | null | null | null | prob_020.py | tansly/euler | 8b420cb05223cf60b6c01aac9bfe8ce5a3b96ddc | [
"MIT"
] | null | null | null | def sum_digit(n):
total = 0
while n != 0:
total += n % 10
n /= 10
return total
def factorial(n):
if n <= 0:
return 1
return n * factorial(n - 1)
| 15.833333 | 31 | 0.473684 | 29 | 190 | 3.068966 | 0.413793 | 0.044944 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.080357 | 0.410526 | 190 | 11 | 32 | 17.272727 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e137881799720563759aa64b3e6bb8a63eb7afae | 496 | py | Python | Chapter13/server.py | Joustie/Mastering-GitLab-12 | 5ac4700791e4274ef3de825bc789c46142af403e | [
"MIT"
] | 40 | 2019-07-06T04:40:27.000Z | 2022-03-31T09:25:07.000Z | Chapter13/server.py | Joustie/Mastering-GitLab-12 | 5ac4700791e4274ef3de825bc789c46142af403e | [
"MIT"
] | 1 | 2019-08-03T17:52:08.000Z | 2020-12-16T06:31:53.000Z | Chapter13/server.py | Joustie/Mastering-GitLab-12 | 5ac4700791e4274ef3de825bc789c46142af403e | [
"MIT"
] | 50 | 2019-07-26T08:49:49.000Z | 2022-03-17T21:01:03.000Z | from flask import Flask, request
import json
app = Flask(__name__)
def runsomething():
print "This is triggered"
@app.route('/',methods=['POST'])
def trigger():
data = json.loads(request.data)
print "New commit by: {}".format(data['commits'][0]['author']['name'])
print "New commit by: {}".format(data['commits'][0]['author']['email'])
print "New commit by: {}".format(data['commits'][0]['message'])
runsomething()
return "OK"
if __name__ == '__main__':
app.run() | 24.8 | 74 | 0.635081 | 64 | 496 | 4.734375 | 0.515625 | 0.079208 | 0.138614 | 0.158416 | 0.376238 | 0.376238 | 0.376238 | 0.376238 | 0.264026 | 0 | 0 | 0.00716 | 0.155242 | 496 | 20 | 75 | 24.8 | 0.71599 | 0 | 0 | 0 | 0 | 0 | 0.265594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.133333 | null | null | 0.266667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e13d3df96caed4ad7bea9f68e21a31547457cf49 | 1,564 | py | Python | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/netcmd/time.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | 2 | 2019-01-13T09:16:31.000Z | 2019-02-15T03:30:28.000Z | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/netcmd/time.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | null | null | null | release/src-rt-6.x.4708/router/samba3/source4/scripting/python/samba/netcmd/time.py | zaion520/ATtomato | 4d48bb79f8d147f89a568cf18da9e0edc41f93fb | [
"FSFAP"
] | 2 | 2020-03-08T01:58:25.000Z | 2020-12-20T10:34:54.000Z | #!/usr/bin/env python
#
# time
#
# Copyright Jelmer Vernooij 2010 <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
import common
from samba.net import Net
from samba.netcmd import (
Command,
)
class cmd_time(Command):
"""Retrieve the time on a remote server [server connection needed]"""
synopsis = "%prog time <server-name>"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_args = ["server_name?"]
def run(self, server_name=None, credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
net = Net(creds, lp, server=credopts.ipaddress)
if server_name is None:
server_name = common.netcmd_dnsname(lp)
print net.time(server_name)
| 32.583333 | 85 | 0.710997 | 214 | 1,564 | 5.140187 | 0.551402 | 0.054545 | 0.035455 | 0.051818 | 0.074545 | 0.050909 | 0 | 0 | 0 | 0 | 0 | 0.004013 | 0.203325 | 1,564 | 47 | 86 | 33.276596 | 0.878812 | 0.43798 | 0 | 0 | 0 | 0 | 0.08091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.190476 | null | null | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e13feb6e08fa5f3de107d84f4998b9cc0fdd3b93 | 1,582 | py | Python | mpcontribs-portal/mpcontribs/portal/urls.py | fraricci/MPContribs | 800e8fded594dce57807e7ef0ec8d3192ce54825 | [
"MIT"
] | null | null | null | mpcontribs-portal/mpcontribs/portal/urls.py | fraricci/MPContribs | 800e8fded594dce57807e7ef0ec8d3192ce54825 | [
"MIT"
] | null | null | null | mpcontribs-portal/mpcontribs/portal/urls.py | fraricci/MPContribs | 800e8fded594dce57807e7ef0ec8d3192ce54825 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.conf.urls import url
from django.views.generic.base import RedirectView
from mpcontribs.portal import views
app_name = "mpcontribs_portal"
urlpatterns = [
url(r"^$", views.index, name="index"),
url(r"^healthcheck/?$", views.healthcheck, name="healthcheck"),
url(
r"^notebooks/(?P<nb>[A-Za-z0-9_\/]{3,}).html$",
views.notebooks,
name="notebooks",
),
url(r"^(?P<cid>[a-f\d]{24})/?$", views.contribution, name="contribution"),
# downloads
url(
r"^component/(?P<oid>[a-f\d]{24})$",
views.download_component,
name="download_component",
),
url(
r"^(?P<cid>[a-f\d]{24}).json.gz$",
views.download_contribution,
name="download_contribution",
),
# TODO .(?P<fmt>[a-z]{3})
url(
r"^(?P<project>[a-zA-Z0-9_]{3,}).json.gz$",
views.download_project,
name="download_project",
),
# redirects
url(r"^fe-co-v/?$", RedirectView.as_view(url="/swf/", permanent=False)),
url(r"^fe-co-v/dataset-01/?$", RedirectView.as_view(url="/swf/", permanent=False)),
url(
r"^boltztrap/?$",
RedirectView.as_view(url="/carrier_transport/", permanent=True),
),
url(
r"^Screeninginorganicpv/?$",
RedirectView.as_view(url="/screening_inorganic_pv/", permanent=False),
),
url(
r"^ScreeningInorganicPV/?$",
RedirectView.as_view(url="/screening_inorganic_pv/", permanent=False),
),
# default view
url(r"^[a-zA-Z0-9_]{3,}/?$", views.landingpage),
]
| 31.019608 | 87 | 0.584071 | 194 | 1,582 | 4.654639 | 0.340206 | 0.057586 | 0.099668 | 0.116279 | 0.352159 | 0.296788 | 0.296788 | 0.296788 | 0.267996 | 0.174972 | 0 | 0.015139 | 0.2067 | 1,582 | 50 | 88 | 31.64 | 0.704382 | 0.049305 | 0 | 0.363636 | 0 | 0 | 0.323765 | 0.20494 | 0 | 0 | 0 | 0.02 | 0 | 1 | 0 | false | 0 | 0.068182 | 0 | 0.068182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e1416e342916d61944b1391ba364f72736a6b340 | 1,415 | py | Python | Pixelfonts/Delete duplicate components.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | 1 | 2022-01-09T04:28:36.000Z | 2022-01-09T04:28:36.000Z | Pixelfonts/Delete duplicate components.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | null | null | null | Pixelfonts/Delete duplicate components.py | NaN-xyz/Glyphs-Scripts | bdacf455babc72e0801d8d8db5dc10f8e88aa37b | [
"Apache-2.0"
] | null | null | null | #MenuTitle: Delete Duplicate Components
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Looks for duplicate components (same component, same x/y values) and keeps only one of them.
"""
Font = Glyphs.font
selectedLayers = Font.selectedLayers
def getAttr( thisLayer, compNumber ):
return [thisLayer.components[compNumber].componentName, thisLayer.components[compNumber].x, thisLayer.components[compNumber].y]
def scanForDuplicates( thisLayer, compNumber ):
if compNumber == len( thisLayer.components ) - 1:
return []
else:
indexList = scanForDuplicates( thisLayer, compNumber + 1 )
currAttr = getAttr( thisLayer, compNumber )
for i in range( compNumber + 1, len( thisLayer.components ) ):
if currAttr == getAttr( thisLayer, i ):
indexList.append(i)
return sorted( set( indexList ) )
def process( thisLayer ):
if len( thisLayer.components ) != 0:
thisLayer.parent.beginUndo()
indexesToBeDeleted = scanForDuplicates( thisLayer, 0 )
for indexToBeDeleted in indexesToBeDeleted[::-1]:
del thisLayer.components[indexToBeDeleted]
print len( indexesToBeDeleted )
thisLayer.parent.endUndo()
else:
# no components in this layer
print "n/a"
Font.disableUpdateInterface()
for thisLayer in selectedLayers:
print "Components deleted in %s:" % thisLayer.parent.name,
process( thisLayer )
Font.enableUpdateInterface()
| 27.745098 | 128 | 0.743463 | 154 | 1,415 | 6.766234 | 0.448052 | 0.127639 | 0.083493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005838 | 0.15265 | 1,415 | 50 | 129 | 28.3 | 0.863219 | 0.062191 | 0 | 0.060606 | 0 | 0.030303 | 0.092145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.030303 | null | null | 0.121212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e141938b24307f066ff503fed7f111fa1bbefd00 | 3,317 | py | Python | src/structures/Errors.py | Xiddoc/ComPy | 7d26f95209d0615d7eb188fa02470ddae5311fca | [
"MIT"
] | null | null | null | src/structures/Errors.py | Xiddoc/ComPy | 7d26f95209d0615d7eb188fa02470ddae5311fca | [
"MIT"
] | 9 | 2022-02-23T10:32:44.000Z | 2022-03-27T17:55:43.000Z | src/structures/Errors.py | Xiddoc/ComPy | 7d26f95209d0615d7eb188fa02470ddae5311fca | [
"MIT"
] | null | null | null | """
Error classes, when needed for exceptions.
"""
from _ast import AST
from dataclasses import dataclass, field
from typing import Optional, Union
from src.compiler.Util import Util
@dataclass(frozen=True)
class ObjectAlreadyDefinedError(NameError):
"""
For our compilation scheme, objects can only be defined once and must be given a type hint.
If you try to type hint the same object 2 times, this should raise an error.
From this, you should also realize that object types are immutable and cannot be freed.
"""
object_name: str
def __str__(self) -> str:
# Error text
return f"You cannot redefine object '{self.object_name}' as it is already initialized."
@dataclass(frozen=True)
class ObjectNotDefinedError(NameError):
"""
As stated in ObjectAlreadyDefinedError, a object must have an explicit type hint the first time it is used.
This is referred to as "defining" or "initializing".
If a object is referenced without being defined, then the compiler should throw this error.
"""
object_name: str
def __str__(self) -> str:
# Error text
return f"Object '{self.object_name}' was not initialized yet."
@dataclass(frozen=True)
class UnsupportedFeatureException(SyntaxError):
"""
An error to raise whenever a Python feature is used which is not implemented in the compiler.
Examples (currently) include classes, for example. (Boo hoo, no OOP for you)
"""
feature: Union[AST, str]
def __str__(self) -> str:
# Local import to avoid import error
# Error text
return "Python feature '" + \
(Util.get_name(self.feature) if isinstance(self.feature, AST) else self.feature) + \
"' is not supported by the compiler."
@dataclass(frozen=True)
class InvalidArgumentError(ValueError):
"""
An error to throw when the user inputted an invalid argument.
Specifically, to be used for command line arguments. Not for
syntax arguments / code that is currently being compiled.
"""
argument: Optional[str] = field(default=None)
def __str__(self) -> str:
# Error text
return f"Argument '{self.argument}' is not valid." \
if self.argument is not None else \
"Internal argument handling error encountered."
@dataclass(frozen=True)
class SyntaxSubsetError(SyntaxError):
"""
An error to throw when the user's code does
not match the syntax subset specifications.
"""
warning: str = field()
def __str__(self) -> str:
# Error text
return f"Invalid usage of '{self.warning}' caused a syntax error (the code must comply to the syntax subset)."
@dataclass(frozen=True)
class InvalidTypeError(TypeError):
"""
An error to throw when the user gave an invalid type or
value of a non-corresponding type (in their syntax/code).
"""
given_type: Optional[str] = field(default=None)
expected_type: Optional[str] = field(default=None)
def __str__(self) -> str:
# Error text
return f"Could not use type '{self.given_type}' when type '{self.expected_type}' was expected." \
if self.given_type is not None else \
"Invalid types (or value of conflicting type) found in code."
| 31.894231 | 118 | 0.67561 | 441 | 3,317 | 5.002268 | 0.353742 | 0.040798 | 0.051677 | 0.065277 | 0.159112 | 0.151859 | 0.135993 | 0.101995 | 0.088849 | 0.088849 | 0 | 0.000397 | 0.241182 | 3,317 | 103 | 119 | 32.203884 | 0.876043 | 0.364185 | 0 | 0.341463 | 0 | 0.02439 | 0.258507 | 0.011173 | 0 | 0 | 0 | 0 | 0 | 1 | 0.146341 | false | 0 | 0.097561 | 0.146341 | 0.707317 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
e14d0acbede38071c9f51e6e3d4fd2359e4f607b | 863 | py | Python | pylbd/s3_object.py | MacHu-GWU/pylbd-project | d9be28d1f9f7679237e4d3c86f63ea06f43249dd | [
"MIT"
] | null | null | null | pylbd/s3_object.py | MacHu-GWU/pylbd-project | d9be28d1f9f7679237e4d3c86f63ea06f43249dd | [
"MIT"
] | null | null | null | pylbd/s3_object.py | MacHu-GWU/pylbd-project | d9be28d1f9f7679237e4d3c86f63ea06f43249dd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import boto3
from botocore.exceptions import ClientError
import attr
from attrs_mate import AttrsClass
import weakref
@attr.s
class S3Object(AttrsClass):
aws_profile = attr.ib()
bucket = attr.ib() # type: str
key = attr.ib() # type: str
_s3_client_cache = weakref.WeakValueDictionary()
def s3_client(self):
if self.aws_profile not in self._s3_client_cache:
client = boto3.session.Session(profile_name=self.aws_profile).client("s3")
self._s3_client_cache[self.aws_profile] = client
return self._s3_client_cache[self.aws_profile]
def exists_on_s3(self):
try:
self.s3_client().head_object(Bucket=self.bucket, Key=self.key)
return True
except ClientError:
return False
except Exception as e:
raise e
| 26.151515 | 86 | 0.659328 | 113 | 863 | 4.823009 | 0.424779 | 0.088073 | 0.095413 | 0.093578 | 0.113761 | 0.113761 | 0.113761 | 0 | 0 | 0 | 0 | 0.018519 | 0.249131 | 863 | 32 | 87 | 26.96875 | 0.822531 | 0.047509 | 0 | 0 | 0 | 0 | 0.002448 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.208333 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
e14daa2e05a40d76224587c816432879876d552a | 949 | py | Python | utils.py | YaelMoshe/CSV-Compressor | 82a72c1750a8d1fb4b6f3d312995a537edbda48c | [
"MIT"
] | null | null | null | utils.py | YaelMoshe/CSV-Compressor | 82a72c1750a8d1fb4b6f3d312995a537edbda48c | [
"MIT"
] | null | null | null | utils.py | YaelMoshe/CSV-Compressor | 82a72c1750a8d1fb4b6f3d312995a537edbda48c | [
"MIT"
] | null | null | null |
class CompressorUtils(object):
@staticmethod
def encode(_cell, _list):
if not _cell:
data = "-"
elif _cell not in _list:
data = str(len(_list))
_list.append(_cell)
else:
data = str(_list.index(_cell))
return data
@staticmethod
def decode(_cell, _list):
data = ""
if _cell is not "-":
print _cell
data = _list[int(_cell)]
return data
@staticmethod
def get_lists_string(lists):
all_lists = ""
for element in lists:
all_lists += ','.join(element)
all_lists += "@"
return all_lists[:-1]
@staticmethod
def get_string_lists(str_to_lists):
list_of_lists = []
ll = str_to_lists.split("@")
for l in ll:
list_of_lists.append( l.split(","))
return list_of_lists
| 23.725 | 48 | 0.500527 | 102 | 949 | 4.323529 | 0.352941 | 0.136054 | 0.07483 | 0.117914 | 0.131519 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001754 | 0.399368 | 949 | 39 | 49 | 24.333333 | 0.77193 | 0 | 0 | 0.1875 | 0 | 0 | 0.006601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e150737ff1e7de27f34b49c4df0d1658c30b7b57 | 2,469 | py | Python | Gds/src/fprime_gds/common/data_types/sys_data.py | m-aleem/fprime | ae8a2a43a39d0e8a1908a82b48106467357d6cba | [
"Apache-2.0"
] | 1 | 2020-05-12T03:43:36.000Z | 2020-05-12T03:43:36.000Z | Gds/src/fprime_gds/common/data_types/sys_data.py | abcouwer-jpl/fprime | f28c92e31d58e7e44bff09ad57d574ca5d5e91c7 | [
"Apache-2.0"
] | 5 | 2020-05-26T21:38:02.000Z | 2020-05-26T21:43:33.000Z | Gds/src/fprime_gds/common/data_types/sys_data.py | abcouwer-jpl/fprime | f28c92e31d58e7e44bff09ad57d574ca5d5e91c7 | [
"Apache-2.0"
] | 3 | 2020-09-05T18:17:21.000Z | 2020-11-15T04:06:24.000Z | '''
@brief Base class for system data classes.
This class defines the interface for cata classes which are intended to hold
a specific data item (packet, channel, event). This data item includes the time
of the data as well as data such as channel value or argument value.
@date Created July 2, 2018
@author R. Joseph Paetz (rpaetz@jpl.nasa.gov)
@bug No known bugs
'''
from fprime.common.models.serialize import time_type
from fprime_gds.common.templates import data_template
import fprime_gds.common.utils.jsonable
class SysData(object):
'''
The SysData class defines the interface for system data classes which are
for specific data readings/events
'''
def __init__(self):
'''
Constructor.
Each subclass will define new constructors with necessary arguments.
The necessary fields are time, id, and template.
Returns:
An initialized SysData object
'''
if not self.id:
self.id = 0
if not self.template:
self.template = data_template.DataTemplate()
if not self.time:
self.time = time_type.TimeType()
def get_id(self):
'''
Returns the id of the channel
Returns:
The id of the channel
'''
return self.id
def get_time(self):
'''
Returns the time of the channel data reading
Returns:
Time of the reading as a TimeType
'''
return self.time
def get_template(self):
'''
Returns the template class instance for the data stored
Returns:
An instance of a template class for this instance's data
'''
return self.template
def to_jsonable(self):
'''
Converts to a JSONable object (primatives, anon-objects, lists)
'''
return fprime_gds.common.utils.jsonable.fprime_to_jsonable(self)
@staticmethod
def compare(x, y):
'''
Compares two data items.
Returns:
Negative, 0, or positive for t1<t2, t1==t2, t1>t2 respectively
'''
# Compare by time first
time_comp = time_type.TimeType.compare(x.time, y.time)
if (time_comp != 0):
return time_comp
# Compare by id second (just let multiple events at the same time with
# the same id be counted as equal
return cmp(x.id, y.id)
if __name__ == '__main__':
pass
| 24.939394 | 79 | 0.617254 | 323 | 2,469 | 4.628483 | 0.396285 | 0.016722 | 0.01806 | 0.026756 | 0.105686 | 0.032107 | 0 | 0 | 0 | 0 | 0 | 0.00823 | 0.311057 | 2,469 | 98 | 80 | 25.193878 | 0.870664 | 0.495342 | 0 | 0 | 0 | 0 | 0.008138 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0.037037 | 0.111111 | 0 | 0.592593 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e152f4d6dde06ac4acdcd8bfa8623f41a066db20 | 1,654 | py | Python | workflow/migrations/0027_tolausercountryroles_tolauserprogramroles.py | mercycorps/TolaWorkflow | 59542132fafd611081adb0e8cfaa04abc5886d7a | [
"Apache-2.0"
] | null | null | null | workflow/migrations/0027_tolausercountryroles_tolauserprogramroles.py | mercycorps/TolaWorkflow | 59542132fafd611081adb0e8cfaa04abc5886d7a | [
"Apache-2.0"
] | 268 | 2020-03-31T15:46:59.000Z | 2022-03-31T18:01:08.000Z | workflow/migrations/0027_tolausercountryroles_tolauserprogramroles.py | Falliatcom-sa/falliatcom | 39fb926de072c296ed32d50cccfb8003ca870739 | [
"Apache-2.0"
] | 1 | 2021-01-05T01:58:24.000Z | 2021-01-05T01:58:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-01-18 17:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workflow', '0026_auto_20190116_1357'),
]
operations = [
migrations.CreateModel(
name='TolaUserCountryRoles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(choices=[('user', 'User'), ('basic_admin', 'Basic Admin'), ('super_admin', 'Super Admin')], max_length=100)),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_roles', to='workflow.Country')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='country_roles', to='workflow.TolaUser')),
],
),
migrations.CreateModel(
name='TolaUserProgramRoles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(choices=[('low', 'Low'), ('medium', 'Medium'), ('high', 'High')], max_length=100)),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_roles', to='workflow.Program')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='program_roles', to='workflow.TolaUser')),
],
),
]
| 47.257143 | 151 | 0.628174 | 178 | 1,654 | 5.668539 | 0.38764 | 0.047572 | 0.069376 | 0.109019 | 0.507433 | 0.507433 | 0.507433 | 0.507433 | 0.507433 | 0.507433 | 0 | 0.029931 | 0.212213 | 1,654 | 34 | 152 | 48.647059 | 0.744436 | 0.041112 | 0 | 0.37037 | 1 | 0 | 0.188882 | 0.014529 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e15353b2bdb09ab7d8c47ead4ff13403eb177890 | 753 | py | Python | set-2/challenge-11.py | natehouk/cryptopals-crypto-challenges-solutions | 3b89a94d42a9b052b2f79d37ba3fa9e3ec17c869 | [
"MIT"
] | null | null | null | set-2/challenge-11.py | natehouk/cryptopals-crypto-challenges-solutions | 3b89a94d42a9b052b2f79d37ba3fa9e3ec17c869 | [
"MIT"
] | null | null | null | set-2/challenge-11.py | natehouk/cryptopals-crypto-challenges-solutions | 3b89a94d42a9b052b2f79d37ba3fa9e3ec17c869 | [
"MIT"
] | null | null | null | import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import random
from util.util import pad, detect_aes_ecb, generate_key, ammend_plaintext, encrypt_random
# Chosen plaintext
plaintext = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
# Generate data and encrypt plaintext
key = generate_key()
plaintext = pad(ammend_plaintext(plaintext), 16)
ciphertext = encrypt_random(key, plaintext)
# Detect AES in ECB mode
detect = detect_aes_ecb(ciphertext)
# Print answer
print("Plaintext: " + str(plaintext, 'latin-1'))
print("Ciphertext: " + str(ciphertext, 'latin-1'))
if (detect[1] == 6):
print("Guess: ECB without CBC mode")
elif (detect[1] == 4):
print("Guess: ECB with CBC mode")
else:
raise Exception | 30.12 | 89 | 0.749004 | 102 | 753 | 5.392157 | 0.431373 | 0.032727 | 0.047273 | 0.054545 | 0.058182 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012158 | 0.126162 | 753 | 25 | 90 | 30.12 | 0.823708 | 0.116866 | 0 | 0 | 1 | 0 | 0.198185 | 0.065053 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.176471 | 0.235294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e1601ec501793267dab5b7a344de5c414ede0c73 | 2,904 | py | Python | PA1/ArrayListTests/main_create_tests.py | tordisuna/SC-T-201-GSKI | 1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2 | [
"MIT"
] | null | null | null | PA1/ArrayListTests/main_create_tests.py | tordisuna/SC-T-201-GSKI | 1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2 | [
"MIT"
] | null | null | null | PA1/ArrayListTests/main_create_tests.py | tordisuna/SC-T-201-GSKI | 1e89e5b31e7d74aeecae3dffe2df7ac9e8bb40f2 | [
"MIT"
] | 1 | 2021-02-12T11:36:53.000Z | 2021-02-12T11:36:53.000Z |
import random
from random import Random
def write_test_line(f, r, c, clear, o = 0):
f.write("\n")
if(o == 0):
if clear == 0:
o = r.randint(1, 8)
elif clear == 1:
o = 9
else:
o = r.randint(1, 9)
if o == 1:
f.write("prepend")
f.write(" ")
f.write(str(r.randint(10, 99)))
c += 1
elif o == 2:
f.write("insert")
f.write(" ")
f.write(str(r.randint(10, 99)))
f.write(" ")
f.write(str(r.randint(0, c)))
c += 1
elif o == 3:
f.write("append")
f.write(" ")
f.write(str(r.randint(10, 99)))
c += 1
elif o == 4:
f.write("set_at")
f.write(" ")
f.write(str(r.randint(10, 99)))
f.write(" ")
f.write(str(r.randint(0, c)))
elif o == 5:
f.write("get_first")
elif o == 6:
f.write("get_at")
f.write(" ")
f.write(str(r.randint(0, c)))
elif o == 7:
f.write("get_last")
elif o == 8:
f.write("remove_at")
f.write(" ")
f.write(str(r.randint(0, c)))
c -= 1
elif o == 9:
if r.randint(1, clear) == 1:
f.write("clear")
c = 2
return c
def write_insert_ordered_line(f, r, c):
f.write("\n")
f.write("insert_ordered")
f.write(" ")
f.write(str(r.randint(10, 30)))
c += 1
return c
def write_sort_line(f, r, c):
f.write("\n")
f.write("sort")
return c
def write_find_line(f, r, c):
f.write("\n")
f.write("find")
f.write(" ")
f.write(str(r.randint(10, 30)))
return c
def write_remove_value_line(f, r, c):
f.write("\n")
f.write("remove_value")
f.write(" ")
f.write(str(r.randint(10, 30)))
c -= 1
return c
r = Random()
f = open("extra_tests.txt", "w+")
f.write("new int")
c = 2
for _ in range(64):
c = write_test_line(f, r, c, 0)
c = write_test_line(f, r, c, 1)
for _ in range(128):
c = write_test_line(f, r, c, 0)
c = write_test_line(f, r, c, 1)
for _ in range(512):
c = write_test_line(f, r, c, 5)
for _ in range(20):
c = write_insert_ordered_line(f, r, c)
c = write_test_line(f, r, c, 1)
for _ in range(20):
c = write_test_line(f, r, c, 2, 2)
c = write_insert_ordered_line(f, r, c)
for _ in range(32):
c = write_test_line(f, r, c, 2, 1)
for _ in range(10):
c = write_find_line(f, r, c)
for _ in range(10):
c = write_remove_value_line(f, r, c)
c = write_test_line(f, r, c, 1)
for _ in range(32):
c = write_insert_ordered_line(f, r, c)
for _ in range(10):
c = write_find_line(f, r, c)
for _ in range(10):
c = write_remove_value_line(f, r, c)
for _ in range(32):
c = write_test_line(f, r, c, 2, 2)
for _ in range(10):
c = write_find_line(f, r, c)
for _ in range(10):
c = write_remove_value_line(f, r, c)
f.close()
| 19.755102 | 43 | 0.512741 | 509 | 2,904 | 2.772102 | 0.119843 | 0.174344 | 0.102055 | 0.119064 | 0.726435 | 0.726435 | 0.697378 | 0.672573 | 0.645641 | 0.555634 | 0 | 0.051295 | 0.30854 | 2,904 | 146 | 44 | 19.890411 | 0.651394 | 0 | 0 | 0.596491 | 0 | 0 | 0.04857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.017544 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e16e5534d48d16f412f05cd80a5b2d4be81a0792 | 702 | py | Python | api/krenak_api/apps/activities/migrations/0008_auto_20210506_2357.py | bacuarabrasil/krenak | ad6a3af5ff162783ec9bd40d07a82f09bf35071b | [
"MIT"
] | null | null | null | api/krenak_api/apps/activities/migrations/0008_auto_20210506_2357.py | bacuarabrasil/krenak | ad6a3af5ff162783ec9bd40d07a82f09bf35071b | [
"MIT"
] | 26 | 2021-03-10T22:07:57.000Z | 2021-03-11T12:13:35.000Z | api/krenak_api/apps/activities/migrations/0008_auto_20210506_2357.py | bacuarabrasil/krenak | ad6a3af5ff162783ec9bd40d07a82f09bf35071b | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-05-06 23:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mentorships', '0001_initial'),
('activities', '0007_activity_enrollment'),
]
operations = [
migrations.RemoveField(
model_name='activity',
name='enrollment',
),
migrations.AddField(
model_name='activity',
name='mentorship',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='activities', to='mentorships.mentorship', verbose_name='Mentorship'),
),
]
| 28.08 | 175 | 0.633903 | 71 | 702 | 6.15493 | 0.605634 | 0.05492 | 0.064073 | 0.100687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043315 | 0.24359 | 702 | 24 | 176 | 29.25 | 0.779661 | 0.064103 | 0 | 0.222222 | 1 | 0 | 0.206107 | 0.070229 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e1741137c9f22621cbb2d5cd7d5c872d48ea9402 | 45,528 | py | Python | grr/client/client_actions/file_finder_test.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | grr/client/client_actions/file_finder_test.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | grr/client/client_actions/file_finder_test.py | panhania/grr | fe16a7311a528e31fe0e315a880e98273b8df960 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Tests the client file finder action."""
import collections
import glob
import hashlib
import os
import platform
import shutil
import subprocess
import unittest
import mock
import psutil
import unittest
from grr.client import comms
from grr.client.client_actions import file_finder as client_file_finder
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import file_finder as rdf_file_finder
from grr.lib.rdfvalues import standard as rdf_standard
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
def MyStat(path):
stat_obj = MyStat.old_target(path)
if path.endswith("auth.log"):
res = list(stat_obj)
# Sets atime, ctime, and mtime to some time in 2022.
res[-1] = 1672466423
res[-2] = 1672466423
res[-3] = 1672466423
return os.stat_result(res)
return stat_obj
class FileFinderTest(client_test_lib.EmptyActionTest):
def setUp(self):
super(FileFinderTest, self).setUp()
self.stat_action = rdf_file_finder.FileFinderAction.Stat()
def _GetRelativeResults(self, raw_results, base_path=None):
base_path = base_path or self.base_path
return [
result.stat_entry.pathspec.path[len(base_path) + 1:]
for result in raw_results
]
def _RunFileFinder(self,
paths,
action,
conditions=None,
follow_links=True,
**kw):
return self.RunAction(
client_file_finder.FileFinderOS,
arg=rdf_file_finder.FileFinderArgs(
paths=paths,
action=action,
conditions=conditions,
process_non_regular_files=True,
follow_links=follow_links,
**kw))
def testFileFinder(self):
paths = [self.base_path + "/*"]
results = self._RunFileFinder(paths, self.stat_action)
self.assertEqual(
self._GetRelativeResults(results), os.listdir(self.base_path))
profiles_path = os.path.join(self.base_path, "profiles/v1.0")
paths = [os.path.join(self.base_path, "profiles/v1.0") + "/*"]
results = self._RunFileFinder(paths, self.stat_action)
self.assertEqual(
self._GetRelativeResults(results, base_path=profiles_path),
os.listdir(profiles_path))
def testRecursiveGlob(self):
paths = [self.base_path + "/**3"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b", relative_results)
self.assertIn("a/b/c", relative_results)
self.assertIn("a/b/d", relative_results)
self.assertNotIn("a/b/c/helloc.txt", relative_results)
self.assertNotIn("a/b/d/hellod.txt", relative_results)
paths = [self.base_path + "/**4"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b", relative_results)
self.assertIn("a/b/c", relative_results)
self.assertIn("a/b/d", relative_results)
self.assertIn("a/b/c/helloc.txt", relative_results)
self.assertIn("a/b/d/hellod.txt", relative_results)
def testRegexGlob(self):
paths = [self.base_path + "/rekall*.gz"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
for glob_result in glob.glob(self.base_path + "/rekall*gz"):
self.assertIn(os.path.basename(glob_result), relative_results)
def testRecursiveRegexGlob(self):
paths = [self.base_path + "/**3/*.gz"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("profiles/v1.0/nt/index.gz", relative_results)
self.assertIn("bigquery/ExportedFile.json.gz", relative_results)
for r in relative_results:
self.assertEqual(os.path.splitext(r)[1], ".gz")
paths = [self.base_path + "/**2/*.gz"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertNotIn("profiles/v1.0/nt/index.gz", relative_results)
self.assertIn("bigquery/ExportedFile.json.gz", relative_results)
for r in relative_results:
self.assertEqual(os.path.splitext(r)[1], ".gz")
def testDoubleRecursionFails(self):
paths = [self.base_path + "/**/**/test.exe"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
def testInvalidInput(self):
paths = [self.base_path + "/r**z"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
paths = [self.base_path + "/**.exe"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
paths = [self.base_path + "/test**"]
with self.assertRaises(ValueError):
self._RunFileFinder(paths, self.stat_action)
def testGroupings(self):
paths = [self.base_path + "/a/b/{c,d}/hello*"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b/c/helloc.txt", relative_results)
self.assertIn("a/b/d/hellod.txt", relative_results)
paths = [self.base_path + "/a/b/*/hello{c,d}.txt"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results)
self.assertIn("a/b/c/helloc.txt", relative_results)
self.assertIn("a/b/d/hellod.txt", relative_results)
def testFollowLinks(self):
try:
# This sets up a structure as follows:
# tmp_dir/lnk_test/contains_lnk
# tmp_dir/lnk_test/contains_lnk/lnk
# tmp_dir/lnk_test/lnk_target
# tmp_dir/lnk_test/lnk_target/target
# lnk is a symbolic link to lnk_target. A recursive find in
# contains_lnk will find the target iff follow_links is allowed.
test_dir = os.path.join(self.temp_dir, "lnk_test")
contains_lnk = os.path.join(test_dir, "contains_lnk")
lnk = os.path.join(contains_lnk, "lnk")
lnk_target = os.path.join(test_dir, "lnk_target")
lnk_target_contents = os.path.join(lnk_target, "target")
os.mkdir(test_dir)
os.mkdir(contains_lnk)
os.mkdir(lnk_target)
os.symlink(lnk_target, lnk)
with open(lnk_target_contents, "wb") as fd:
fd.write("sometext")
paths = [contains_lnk + "/**"]
results = self._RunFileFinder(paths, self.stat_action)
relative_results = self._GetRelativeResults(results, base_path=test_dir)
self.assertIn("contains_lnk/lnk", relative_results)
self.assertIn("contains_lnk/lnk/target", relative_results)
results = self._RunFileFinder(paths, self.stat_action, follow_links=False)
relative_results = self._GetRelativeResults(results, base_path=test_dir)
self.assertIn("contains_lnk/lnk", relative_results)
self.assertNotIn("contains_lnk/lnk/target", relative_results)
finally:
try:
shutil.rmtree(test_dir)
except OSError:
pass
def _PrepareTimestampedFiles(self):
searching_path = os.path.join(self.base_path, "searching")
test_dir = os.path.join(self.temp_dir, "times_test")
os.mkdir(test_dir)
for f in ["dpkg.log", "dpkg_false.log", "auth.log"]:
src = os.path.join(searching_path, f)
dst = os.path.join(test_dir, f)
shutil.copy(src, dst)
return test_dir
def RunAndCheck(self,
paths,
action=None,
conditions=None,
expected=None,
unexpected=None,
base_path=None,
**kw):
action = action or self.stat_action
raw_results = self._RunFileFinder(
paths, action, conditions=conditions, **kw)
relative_results = self._GetRelativeResults(
raw_results, base_path=base_path)
for f in unexpected:
self.assertNotIn(f, relative_results)
for f in expected:
self.assertIn(f, relative_results)
def testLiteralMatchCondition(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
literal = "pam_unix(ssh:session)"
clmc = rdf_file_finder.FileFinderContentsLiteralMatchCondition
bytes_before = 10
bytes_after = 20
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_LITERAL_MATCH",
contents_literal_match=clmc(
literal=literal, bytes_before=bytes_before,
bytes_after=bytes_after))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
relative_results = self._GetRelativeResults(
raw_results, base_path=searching_path)
self.assertEqual(len(relative_results), 1)
self.assertIn("auth.log", relative_results)
self.assertEqual(len(raw_results[0].matches), 1)
buffer_ref = raw_results[0].matches[0]
orig_data = open(os.path.join(searching_path, "auth.log")).read()
self.assertEqual(
len(buffer_ref.data), bytes_before + len(literal) + bytes_after)
self.assertEqual(
orig_data[buffer_ref.offset:buffer_ref.offset + buffer_ref.length],
buffer_ref.data)
def testLiteralMatchConditionAllHits(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
clmc = rdf_file_finder.FileFinderContentsLiteralMatchCondition
bytes_before = 10
bytes_after = 20
literal = "mydomain.com"
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_LITERAL_MATCH",
contents_literal_match=clmc(
literal=literal,
mode="ALL_HITS",
bytes_before=bytes_before,
bytes_after=bytes_after))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
self.assertEqual(len(raw_results), 1)
self.assertEqual(len(raw_results[0].matches), 6)
for buffer_ref in raw_results[0].matches:
self.assertEqual(
buffer_ref.data[bytes_before:bytes_before + len(literal)], literal)
def testLiteralMatchConditionLargeFile(self):
paths = [os.path.join(self.base_path, "new_places.sqlite")]
literal = "RecentlyBookmarked"
clmc = rdf_file_finder.FileFinderContentsLiteralMatchCondition
bytes_before = 10
bytes_after = 20
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_LITERAL_MATCH",
contents_literal_match=clmc(
literal=literal,
mode="ALL_HITS",
bytes_before=bytes_before,
bytes_after=bytes_after))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
self.assertEqual(len(raw_results), 1)
self.assertEqual(len(raw_results[0].matches), 1)
buffer_ref = raw_results[0].matches[0]
with open(paths[0], "rb") as fd:
fd.seek(buffer_ref.offset)
self.assertEqual(buffer_ref.data, fd.read(buffer_ref.length))
self.assertEqual(
buffer_ref.data[bytes_before:bytes_before + len(literal)], literal)
def testRegexMatchCondition(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
regex = r"pa[nm]_o?unix\(s{2}h"
bytes_before = 10
bytes_after = 20
crmc = rdf_file_finder.FileFinderContentsRegexMatchCondition
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_REGEX_MATCH",
contents_regex_match=crmc(
regex=regex,
bytes_before=bytes_before,
bytes_after=bytes_after,
))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
relative_results = self._GetRelativeResults(
raw_results, base_path=searching_path)
self.assertEqual(len(relative_results), 1)
self.assertIn("auth.log", relative_results)
self.assertEqual(len(raw_results[0].matches), 1)
buffer_ref = raw_results[0].matches[0]
orig_data = open(os.path.join(searching_path, "auth.log")).read()
self.assertEqual(
orig_data[buffer_ref.offset:buffer_ref.offset + buffer_ref.length],
buffer_ref.data)
def testRegexMatchConditionAllHits(self):
searching_path = os.path.join(self.base_path, "searching")
paths = [searching_path + "/{dpkg.log,dpkg_false.log,auth.log}"]
bytes_before = 10
bytes_after = 20
crmc = rdf_file_finder.FileFinderContentsRegexMatchCondition
regex = r"mydo....\.com"
condition = rdf_file_finder.FileFinderCondition(
condition_type="CONTENTS_REGEX_MATCH",
contents_regex_match=crmc(
regex=regex,
mode="ALL_HITS",
bytes_before=bytes_before,
bytes_after=bytes_after,
))
raw_results = self._RunFileFinder(
paths, self.stat_action, conditions=[condition])
self.assertEqual(len(raw_results), 1)
self.assertEqual(len(raw_results[0].matches), 6)
for buffer_ref in raw_results[0].matches:
needle = "mydomain.com"
self.assertEqual(buffer_ref.data[bytes_before:bytes_before + len(needle)],
needle)
def testHashAction(self):
paths = [os.path.join(self.base_path, "hello.exe")]
hash_action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.HASH)
results = self._RunFileFinder(paths, hash_action)
self.assertEqual(len(results), 1)
res = results[0]
data = open(paths[0], "rb").read()
self.assertEqual(res.hash_entry.num_bytes, len(data))
self.assertEqual(res.hash_entry.md5.HexDigest(),
hashlib.md5(data).hexdigest())
self.assertEqual(res.hash_entry.sha1.HexDigest(),
hashlib.sha1(data).hexdigest())
self.assertEqual(res.hash_entry.sha256.HexDigest(),
hashlib.sha256(data).hexdigest())
hash_action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.HASH,
hash=rdf_file_finder.FileFinderHashActionOptions(
max_size=100, oversized_file_policy="SKIP"))
results = self._RunFileFinder(paths, hash_action)
self.assertEqual(len(results), 1)
res = results[0]
self.assertFalse(res.HasField("hash"))
hash_action = rdf_file_finder.FileFinderAction(
action_type=rdf_file_finder.FileFinderAction.Action.HASH,
hash=rdf_file_finder.FileFinderHashActionOptions(
max_size=100, oversized_file_policy="HASH_TRUNCATED"))
results = self._RunFileFinder(paths, hash_action)
self.assertEqual(len(results), 1)
res = results[0]
data = open(paths[0], "rb").read()[:100]
self.assertEqual(res.hash_entry.num_bytes, len(data))
self.assertEqual(res.hash_entry.md5.HexDigest(),
hashlib.md5(data).hexdigest())
self.assertEqual(res.hash_entry.sha1.HexDigest(),
hashlib.sha1(data).hexdigest())
self.assertEqual(res.hash_entry.sha256.HexDigest(),
hashlib.sha256(data).hexdigest())
def _RunFileFinderDownloadHello(self, upload, opts=None):
action = rdf_file_finder.FileFinderAction.Download()
action.download = opts
upload.return_value = rdf_client.UploadedFile(
bytes_uploaded=42, file_id="foo", hash=rdf_crypto.Hash())
hello_path = os.path.join(self.base_path, "hello.exe")
return self._RunFileFinder([hello_path], action)
@mock.patch.object(comms.GRRClientWorker, "UploadFile")
def testDownloadActionDefault(self, upload):
results = self._RunFileFinderDownloadHello(upload)
self.assertEquals(len(results), 1)
self.assertTrue(upload.called_with(max_bytes=None))
self.assertTrue(results[0].HasField("uploaded_file"))
self.assertEquals(results[0].uploaded_file, upload.return_value)
@mock.patch.object(comms.GRRClientWorker, "UploadFile")
def testDownloadActionSkip(self, upload):
opts = rdf_file_finder.FileFinderDownloadActionOptions(
max_size=0, oversized_file_policy="SKIP")
results = self._RunFileFinderDownloadHello(upload, opts=opts)
self.assertEquals(len(results), 1)
self.assertFalse(upload.called)
self.assertFalse(results[0].HasField("uploaded_file"))
@mock.patch.object(comms.GRRClientWorker, "UploadFile")
def testDownloadActionTruncate(self, upload):
opts = rdf_file_finder.FileFinderDownloadActionOptions(
max_size=42, oversized_file_policy="DOWNLOAD_TRUNCATED")
results = self._RunFileFinderDownloadHello(upload, opts=opts)
self.assertEquals(len(results), 1)
self.assertTrue(upload.called_with(max_bytes=42))
self.assertTrue(results[0].HasField("uploaded_file"))
self.assertEquals(results[0].uploaded_file, upload.return_value)
EXT2_COMPR_FL = 0x00000004
EXT2_IMMUTABLE_FL = 0x00000010
# TODO(hanuszczak): Maybe it would make sense to refactor this to a helper
# constructor of the `rdf_file_finder.FileFinderAction`.
@staticmethod
def _StatAction(**kwargs):
action_type = rdf_file_finder.FileFinderAction.Action.STAT
opts = rdf_file_finder.FileFinderStatActionOptions(**kwargs)
return rdf_file_finder.FileFinderAction(action_type=action_type, stat=opts)
@unittest.skipIf(platform.system() != "Linux", "requires Linux")
def testStatExtFlags(self):
with test_lib.AutoTempFilePath() as temp_filepath:
if subprocess.call(["which", "chattr"]) != 0:
raise unittest.SkipTest("`chattr` command is not available")
if subprocess.call(["chattr", "+c", temp_filepath]) != 0:
reason = "extended attributes not supported by filesystem"
raise unittest.SkipTest(reason)
action = self._StatAction()
results = self._RunFileFinder([temp_filepath], action)
self.assertEqual(len(results), 1)
stat_entry = results[0].stat_entry
self.assertTrue(stat_entry.st_flags_linux & self.EXT2_COMPR_FL)
self.assertFalse(stat_entry.st_flags_linux & self.EXT2_IMMUTABLE_FL)
def testStatExtAttrs(self):
with test_lib.AutoTempFilePath() as temp_filepath:
self._SetExtAttr(temp_filepath, "user.foo", "bar")
self._SetExtAttr(temp_filepath, "user.quux", "norf")
action = self._StatAction()
results = self._RunFileFinder([temp_filepath], action)
self.assertEqual(len(results), 1)
ext_attrs = results[0].stat_entry.ext_attrs
self.assertEqual(ext_attrs[0].name, "user.foo")
self.assertEqual(ext_attrs[0].value, "bar")
self.assertEqual(ext_attrs[1].name, "user.quux")
self.assertEqual(ext_attrs[1].value, "norf")
action = self._StatAction(ext_attrs=False)
results = self._RunFileFinder([temp_filepath], action)
self.assertEqual(len(results), 1)
ext_attrs = results[0].stat_entry.ext_attrs
self.assertFalse(ext_attrs)
@classmethod
def _SetExtAttr(cls, filepath, name, value):
if platform.system() == "Linux":
cls._SetExtAttrLinux(filepath, name, value)
elif platform.system() == "Darwin":
cls._SetExtAttrOsx(filepath, name, value)
else:
raise unittest.SkipTest("unsupported system")
@classmethod
def _SetExtAttrLinux(cls, filepath, name, value):
if subprocess.call(["which", "setfattr"]) != 0:
raise unittest.SkipTest("`setfattr` command is not available")
if subprocess.call(["setfattr", filepath, "-n", name, "-v", value]) != 0:
raise unittest.SkipTest("extended attributes not supported by filesystem")
@classmethod
def _SetExtAttrOsx(cls, filepath, name, value):
if subprocess.call(["xattr", "-w", name, value, filepath]) != 0:
raise unittest.SkipTest("extended attributes not supported")
def testLinkStat(self):
"""Tests resolving symlinks when getting stat entries."""
test_dir = os.path.join(self.temp_dir, "lnk_stat_test")
lnk = os.path.join(test_dir, "lnk")
lnk_target = os.path.join(test_dir, "lnk_target")
os.mkdir(test_dir)
with open(lnk_target, "wb") as fd:
fd.write("sometext")
os.symlink(lnk_target, lnk)
paths = [lnk]
link_size = os.lstat(lnk).st_size
target_size = os.stat(lnk).st_size
for expected_size, resolve_links in [(link_size, False), (target_size,
True)]:
stat_action = rdf_file_finder.FileFinderAction.Stat(
resolve_links=resolve_links)
results = self._RunFileFinder(paths, stat_action)
self.assertEqual(len(results), 1)
res = results[0]
self.assertEqual(res.stat_entry.st_size, expected_size)
def testModificationTimeCondition(self):
with utils.Stubber(os, "lstat", MyStat):
test_dir = self._PrepareTimestampedFiles()
# We have one "old" file, auth.log, and two "new" ones, dpkg*.
paths = [test_dir + "/{dpkg.log,dpkg_false.log,auth.log}"]
change_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
modification_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="MODIFICATION_TIME",
modification_time=rdf_file_finder.FileFinderModificationTimeCondition(
max_last_modified_time=change_time))
self.RunAndCheck(
paths,
conditions=[modification_time_condition],
expected=["dpkg.log", "dpkg_false.log"],
unexpected=["auth.log"],
base_path=test_dir)
# Now just the file from 2022.
modification_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="MODIFICATION_TIME",
modification_time=rdf_file_finder.FileFinderModificationTimeCondition(
min_last_modified_time=change_time))
self.RunAndCheck(
paths,
conditions=[modification_time_condition],
expected=["auth.log"],
unexpected=["dpkg.log", "dpkg_false.log"],
base_path=test_dir)
def testAccessTimeCondition(self):
with utils.Stubber(os, "lstat", MyStat):
test_dir = self._PrepareTimestampedFiles()
paths = [test_dir + "/{dpkg.log,dpkg_false.log,auth.log}"]
change_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
# Check we can get the normal files.
access_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="ACCESS_TIME",
access_time=rdf_file_finder.FileFinderAccessTimeCondition(
max_last_access_time=change_time))
self.RunAndCheck(
paths,
conditions=[access_time_condition],
expected=["dpkg.log", "dpkg_false.log"],
unexpected=["auth.log"],
base_path=test_dir)
# Now just the file from 2022.
access_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="ACCESS_TIME",
access_time=rdf_file_finder.FileFinderAccessTimeCondition(
min_last_access_time=change_time))
self.RunAndCheck(
paths,
conditions=[access_time_condition],
expected=["auth.log"],
unexpected=["dpkg.log", "dpkg_false.log"],
base_path=test_dir)
def testInodeChangeTimeCondition(self):
with utils.Stubber(os, "lstat", MyStat):
test_dir = self._PrepareTimestampedFiles()
# We have one "old" file, auth.log, and two "new" ones, dpkg*.
paths = [test_dir + "/{dpkg.log,dpkg_false.log,auth.log}"]
# Check we can get the auth log only (huge ctime).
change_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
ichange_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="INODE_CHANGE_TIME",
inode_change_time=rdf_file_finder.FileFinderInodeChangeTimeCondition(
min_last_inode_change_time=change_time))
self.RunAndCheck(
paths,
conditions=[ichange_time_condition],
expected=["auth.log"],
unexpected=["dpkg.log", "dpkg_false.log"],
base_path=test_dir)
# Now just the others.
ichange_time_condition = rdf_file_finder.FileFinderCondition(
condition_type="INODE_CHANGE_TIME",
inode_change_time=rdf_file_finder.FileFinderInodeChangeTimeCondition(
max_last_inode_change_time=change_time))
self.RunAndCheck(
paths,
conditions=[ichange_time_condition],
expected=["dpkg.log", "dpkg_false.log"],
unexpected=["auth.log"],
base_path=test_dir)
def testSizeCondition(self):
test_dir = self._PrepareTimestampedFiles()
# We have one "old" file, auth.log, and two "new" ones, dpkg*.
paths = [test_dir + "/{dpkg.log,dpkg_false.log,auth.log}"]
# Auth.log is 770 bytes, the other two ~620 each.
size_condition = rdf_file_finder.FileFinderCondition(
condition_type="SIZE",
size=rdf_file_finder.FileFinderSizeCondition(min_file_size=700))
self.RunAndCheck(
paths,
conditions=[size_condition],
expected=["auth.log"],
unexpected=["dpkg.log", "dpkg_false.log"],
base_path=test_dir)
size_condition = rdf_file_finder.FileFinderCondition(
condition_type="SIZE",
size=rdf_file_finder.FileFinderSizeCondition(max_file_size=700))
self.RunAndCheck(
paths,
conditions=[size_condition],
expected=["dpkg.log", "dpkg_false.log"],
unexpected=["auth.log"],
base_path=test_dir)
def testXDEV(self):
test_dir = os.path.join(self.temp_dir, "xdev_test")
local_dev_dir = os.path.join(test_dir, "local_dev")
net_dev_dir = os.path.join(test_dir, "net_dev")
os.mkdir(test_dir)
os.mkdir(local_dev_dir)
os.mkdir(net_dev_dir)
local_file = os.path.join(local_dev_dir, "local_file")
net_file = os.path.join(net_dev_dir, "net_file")
with open(local_file, "wb") as fd:
fd.write("local_data")
with open(net_file, "wb") as fd:
fd.write("net_data")
all_mountpoints = [local_dev_dir, net_dev_dir, "/some/other/dir"]
local_mountpoints = [local_dev_dir]
def MyDiskPartitions(all=False): # pylint: disable=redefined-builtin
mp = collections.namedtuple("MountPoint", ["mountpoint"])
if all:
return [mp(mountpoint=m) for m in all_mountpoints]
else:
return [mp(mountpoint=m) for m in local_mountpoints]
with utils.Stubber(psutil, "disk_partitions", MyDiskPartitions):
paths = [test_dir + "/**5"]
self.RunAndCheck(
paths,
expected=[
"local_dev", "local_dev/local_file", "net_dev", "net_dev/net_file"
],
unexpected=[],
base_path=test_dir,
xdev="ALWAYS")
self.RunAndCheck(
paths,
expected=["local_dev", "local_dev/local_file", "net_dev"],
unexpected=["net_dev/net_file"],
base_path=test_dir,
xdev="LOCAL")
self.RunAndCheck(
paths,
expected=["local_dev", "net_dev"],
unexpected=["local_dev/local_file", "net_dev/net_file"],
base_path=test_dir,
xdev="NEVER")
class RegexMatcherTest(unittest.TestCase):
@staticmethod
def _RegexMatcher(string):
regex = rdf_standard.RegularExpression(string)
return client_file_finder.RegexMatcher(regex)
def testMatchLiteral(self):
matcher = self._RegexMatcher("foo")
span = matcher.Match("foobar", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 0)
self.assertEqual(span.end, 3)
span = matcher.Match("foobarfoobar", 2)
self.assertTrue(span)
self.assertEqual(span.begin, 6)
self.assertEqual(span.end, 9)
def testNoMatchLiteral(self):
matcher = self._RegexMatcher("baz")
span = matcher.Match("foobar", 0)
self.assertFalse(span)
span = matcher.Match("foobazbar", 5)
self.assertFalse(span)
def testMatchWildcard(self):
matcher = self._RegexMatcher("foo.*bar")
span = matcher.Match("foobar", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 0)
self.assertEqual(span.end, 6)
span = matcher.Match("quuxfoobazbarnorf", 2)
self.assertTrue(span)
self.assertEqual(span.begin, 4)
self.assertEqual(span.end, 13)
def testMatchRepeated(self):
matcher = self._RegexMatcher("qu+x")
span = matcher.Match("quuuux", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 0)
self.assertEqual(span.end, 6)
span = matcher.Match("qx", 0)
self.assertFalse(span)
span = matcher.Match("qvvvvx", 0)
self.assertFalse(span)
class LiteralMatcherTest(unittest.TestCase):
def testMatchLiteral(self):
matcher = client_file_finder.LiteralMatcher("bar")
span = matcher.Match("foobarbaz", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 3)
self.assertEqual(span.end, 6)
span = matcher.Match("barbarbar", 0)
self.assertTrue(span)
self.assertEqual(span.begin, 0)
self.assertEqual(span.end, 3)
span = matcher.Match("barbarbar", 4)
self.assertTrue(span)
self.assertEqual(span.begin, 6)
self.assertEqual(span.end, 9)
def testNoMatchLiteral(self):
matcher = client_file_finder.LiteralMatcher("norf")
span = matcher.Match("quux", 0)
self.assertFalse(span)
span = matcher.Match("norf", 2)
self.assertFalse(span)
span = matcher.Match("quuxnorf", 5)
self.assertFalse(span)
class ConditionTestMixin(object):
def setUp(self):
super(ConditionTestMixin, self).setUp()
self.temp_filepath = test_lib.TempFilePath()
def tearDown(self):
super(ConditionTestMixin, self).tearDown()
os.remove(self.temp_filepath)
@unittest.skipIf(platform.system() == "Windows", "requires Unix-like system")
class MetadataConditionTestMixin(ConditionTestMixin):
def Stat(self):
return utils.Stat(self.temp_filepath, follow_symlink=False)
def Touch(self, mode, date):
self.assertIn(mode, ["-m", "-a"])
result = subprocess.call(["touch", mode, "-t", date, self.temp_filepath])
# Sanity check in case something is wrong with the test.
self.assertEqual(result, 0)
class ModificationTimeConditionTest(MetadataConditionTestMixin,
unittest.TestCase):
def testDefault(self):
params = rdf_file_finder.FileFinderCondition()
condition = client_file_finder.ModificationTimeCondition(params)
self.Touch("-m", "198309121200") # 1983-09-12 12:00
self.assertTrue(condition.Check(self.Stat()))
self.Touch("-m", "201710020815") # 2017-10-02 8:15
self.assertTrue(condition.Check(self.Stat()))
def testMinTime(self):
time = rdfvalue.RDFDatetime.FromHumanReadable("2017-12-24 19:00:00")
params = rdf_file_finder.FileFinderCondition()
params.modification_time.min_last_modified_time = time
condition = client_file_finder.ModificationTimeCondition(params)
self.Touch("-m", "201712240100") # 2017-12-24 1:30
self.assertFalse(condition.Check(self.Stat()))
self.Touch("-m", "201806141700") # 2018-06-14 17:00
self.assertTrue(condition.Check(self.Stat()))
def testMaxTime(self):
time = rdfvalue.RDFDatetime.FromHumanReadable("2125-12-28 18:45")
params = rdf_file_finder.FileFinderCondition()
params.modification_time.max_last_modified_time = time
condition = client_file_finder.ModificationTimeCondition(params)
self.Touch("-m", "211811111200") # 2118-11-11 12:00
self.assertTrue(condition.Check(self.Stat()))
self.Touch("-m", "222510201500") # 2225-10-20 15:00
self.assertFalse(condition.Check(self.Stat()))
class AccessTimeConditionTest(MetadataConditionTestMixin, unittest.TestCase):
def testDefault(self):
params = rdf_file_finder.FileFinderCondition()
condition = client_file_finder.AccessTimeCondition(params)
self.Touch("-a", "241007151200") # 2410-07-15 12:00
self.assertTrue(condition.Check(self.Stat()))
self.Touch("-a", "201005160745") # 2010-05-16 7:45
self.assertTrue(condition.Check(self.Stat()))
def testRange(self):
min_time = rdfvalue.RDFDatetime.FromHumanReadable("2156-01-27")
max_time = rdfvalue.RDFDatetime.FromHumanReadable("2191-12-05")
params = rdf_file_finder.FileFinderCondition()
params.access_time.min_last_access_time = min_time
params.access_time.max_last_access_time = max_time
condition = client_file_finder.AccessTimeCondition(params)
self.Touch("-a", "215007280000") # 2150-07-28 0:00
self.assertFalse(condition.Check(self.Stat()))
self.Touch("-a", "219101010000") # 2191-01-01 0:00
self.assertTrue(condition.Check(self.Stat()))
self.Touch("-a", "221003010000") # 2210-03-01 0:00
self.assertFalse(condition.Check(self.Stat()))
class SizeConditionTest(MetadataConditionTestMixin, unittest.TestCase):
def testDefault(self):
params = rdf_file_finder.FileFinderCondition()
condition = client_file_finder.SizeCondition(params)
with open(self.temp_filepath, "wb") as fd:
fd.write("1234567")
self.assertTrue(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("")
self.assertTrue(condition.Check(self.Stat()))
def testRange(self):
params = rdf_file_finder.FileFinderCondition()
params.size.min_file_size = 2
params.size.max_file_size = 6
condition = client_file_finder.SizeCondition(params)
with open(self.temp_filepath, "wb") as fd:
fd.write("1")
self.assertFalse(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("12")
self.assertTrue(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("1234")
self.assertTrue(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("123456")
self.assertTrue(condition.Check(self.Stat()))
with open(self.temp_filepath, "wb") as fd:
fd.write("1234567")
self.assertFalse(condition.Check(self.Stat()))
class ExtFlagsConditionTest(MetadataConditionTestMixin, unittest.TestCase):
# https://github.com/apple/darwin-xnu/blob/master/bsd/sys/stat.h
UF_NODUMP = 0x00000001
UF_IMMUTABLE = 0x00000002
UF_HIDDEN = 0x00008000
# https://github.com/torvalds/linux/blob/master/include/uapi/linux/fs.h
FS_COMPR_FL = 0x00000004
FS_IMMUTABLE_FL = 0x00000010
FS_NODUMP_FL = 0x00000040
def testDefault(self):
params = rdf_file_finder.FileFinderCondition()
condition = client_file_finder.ExtFlagsCondition(params)
self.assertTrue(condition.Check(self.Stat()))
def testNoMatchOsxBitsSet(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_set = self.UF_IMMUTABLE | self.UF_NODUMP
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["nodump"])
self.assertFalse(condition.Check(self.Stat()))
def testNoMatchOsxBitsUnset(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_unset = self.UF_NODUMP | self.UF_HIDDEN
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["hidden"])
self.assertFalse(condition.Check(self.Stat()))
def testNoMatchLinuxBitsSet(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_set = self.FS_IMMUTABLE_FL
condition = client_file_finder.ExtFlagsCondition(params)
self.assertFalse(condition.Check(self.Stat()))
def testNoMatchLinuxBitsUnset(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_unset = self.FS_COMPR_FL
condition = client_file_finder.ExtFlagsCondition(params)
self._Chattr(["+c", "+d"])
self.assertFalse(condition.Check(self.Stat()))
def testMatchOsxBitsSet(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_set = self.UF_NODUMP | self.UF_HIDDEN
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["nodump", "hidden", "uappend"])
try:
self.assertTrue(condition.Check(self.Stat()))
finally:
# Make the test file deletable.
self._Chflags(["nouappend"])
def testMatchLinuxBitsSet(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_set = self.FS_COMPR_FL | self.FS_NODUMP_FL
condition = client_file_finder.ExtFlagsCondition(params)
self._Chattr(["+c", "+d"])
self.assertTrue(condition.Check(self.Stat()))
def testMatchOsxBitsUnset(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_unset = self.UF_NODUMP | self.UF_IMMUTABLE
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["hidden", "uappend"])
try:
self.assertTrue(condition.Check(self.Stat()))
finally:
# Make the test file deletable.
self._Chflags(["nouappend"])
def testMatchLinuxBitsUnset(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_unset = self.FS_IMMUTABLE_FL
condition = client_file_finder.ExtFlagsCondition(params)
self._Chattr(["+c", "+d"])
self.assertTrue(condition.Check(self.Stat()))
def testMatchOsxBitsMixed(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.osx_bits_set = self.UF_NODUMP
params.ext_flags.osx_bits_unset = self.UF_HIDDEN
params.ext_flags.linux_bits_unset = self.FS_NODUMP_FL
condition = client_file_finder.ExtFlagsCondition(params)
self._Chflags(["nodump", "uappend"])
try:
self.assertTrue(condition.Check(self.Stat()))
finally:
# Make the test file deletable.
self._Chflags(["nouappend"])
def testMatchLinuxBitsMixed(self):
params = rdf_file_finder.FileFinderCondition()
params.ext_flags.linux_bits_set = self.FS_NODUMP_FL
params.ext_flags.linux_bits_unset = self.FS_COMPR_FL
params.ext_flags.osx_bits_unset = self.UF_IMMUTABLE
condition = client_file_finder.ExtFlagsCondition(params)
self._Chattr(["+d"])
self.assertTrue(condition.Check(self.Stat()))
def _Chattr(self, args):
if platform.system() != "Linux":
raise unittest.SkipTest("requires Linux")
if subprocess.call(["which", "chattr"]) != 0:
raise unittest.SkipTest("the `chattr` command is not available")
if subprocess.call(["chattr"] + args + [self.temp_filepath]) != 0:
reason = "extended attributes are not supported by filesystem"
raise unittest.SkipTest(reason)
def _Chflags(self, args):
if platform.system() != "Darwin":
raise unittest.SkipTest("requires macOS")
subprocess.check_call(["chflags", ",".join(args), self.temp_filepath])
# TODO(hanuszczak): Write tests for the metadata change condition.
class LiteralMatchConditionTest(ConditionTestMixin, unittest.TestCase):
def testNoHits(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo bar quux")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "baz"
params.contents_literal_match.mode = "ALL_HITS"
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertFalse(results)
def testSomeHits(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo bar foo")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "foo"
params.contents_literal_match.mode = "ALL_HITS"
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 2)
self.assertEqual(results[0].data, "foo")
self.assertEqual(results[0].offset, 0)
self.assertEqual(results[0].length, 3)
self.assertEqual(results[1].data, "foo")
self.assertEqual(results[1].offset, 8)
self.assertEqual(results[1].length, 3)
def testFirstHit(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("bar foo baz foo")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "foo"
params.contents_literal_match.mode = "FIRST_HIT"
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].data, "foo")
self.assertEqual(results[0].offset, 4)
self.assertEqual(results[0].length, 3)
def testContext(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo foo foo")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "foo"
params.contents_literal_match.mode = "ALL_HITS"
params.contents_literal_match.bytes_before = 3
params.contents_literal_match.bytes_after = 2
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 3)
self.assertEqual(results[0].data, "foo f")
self.assertEqual(results[0].offset, 0)
self.assertEqual(results[0].length, 5)
self.assertEqual(results[1].data, "oo foo f")
self.assertEqual(results[1].offset, 1)
self.assertEqual(results[1].length, 8)
self.assertEqual(results[2].data, "oo foo")
self.assertEqual(results[2].offset, 5)
self.assertEqual(results[2].length, 6)
def testStartOffset(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("oooooooo")
params = rdf_file_finder.FileFinderCondition()
params.contents_literal_match.literal = "ooo"
params.contents_literal_match.mode = "ALL_HITS"
params.contents_literal_match.start_offset = 2
condition = client_file_finder.LiteralMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 2)
self.assertEqual(results[0].data, "ooo")
self.assertEqual(results[0].offset, 2)
self.assertEqual(results[0].length, 3)
self.assertEqual(results[1].data, "ooo")
self.assertEqual(results[1].offset, 5)
self.assertEqual(results[1].length, 3)
class RegexMatchCondition(ConditionTestMixin, unittest.TestCase):
def testNoHits(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo bar quux")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "\\d+"
params.contents_regex_match.mode = "FIRST_HIT"
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertFalse(results)
def testSomeHits(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foo 7 bar 49 baz343")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "\\d+"
params.contents_regex_match.mode = "ALL_HITS"
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 3)
self.assertEqual(results[0].data, "7")
self.assertEqual(results[0].offset, 4)
self.assertEqual(results[0].length, 1)
self.assertEqual(results[1].data, "49")
self.assertEqual(results[1].offset, 10)
self.assertEqual(results[1].length, 2)
self.assertEqual(results[2].data, "343")
self.assertEqual(results[2].offset, 16)
self.assertEqual(results[2].length, 3)
def testFirstHit(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("4 8 15 16 23 42 foo 108 bar")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "[a-z]+"
params.contents_regex_match.mode = "FIRST_HIT"
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].data, "foo")
self.assertEqual(results[0].offset, 16)
self.assertEqual(results[0].length, 3)
def testContext(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("foobarbazbaaarquux")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "ba+r"
params.contents_regex_match.mode = "ALL_HITS"
params.contents_regex_match.bytes_before = 3
params.contents_regex_match.bytes_after = 4
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 2)
self.assertEqual(results[0].data, "foobarbazb")
self.assertEqual(results[0].offset, 0)
self.assertEqual(results[0].length, 10)
self.assertEqual(results[1].data, "bazbaaarquux")
self.assertEqual(results[1].offset, 6)
self.assertEqual(results[1].length, 12)
def testStartOffset(self):
with open(self.temp_filepath, "wb") as fd:
fd.write("ooooooo")
params = rdf_file_finder.FileFinderCondition()
params.contents_regex_match.regex = "o+"
params.contents_regex_match.mode = "FIRST_HIT"
params.contents_regex_match.start_offset = 3
condition = client_file_finder.RegexMatchCondition(params)
results = list(condition.Search(self.temp_filepath))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].data, "oooo")
self.assertEqual(results[0].offset, 3)
self.assertEqual(results[0].length, 4)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| 35.708235 | 80 | 0.698405 | 5,556 | 45,528 | 5.519258 | 0.103852 | 0.054296 | 0.030947 | 0.042785 | 0.757313 | 0.703277 | 0.673634 | 0.6346 | 0.596152 | 0.568074 | 0 | 0.020659 | 0.181361 | 45,528 | 1,274 | 81 | 35.736264 | 0.802098 | 0.033496 | 0 | 0.565041 | 0 | 0 | 0.0779 | 0.012331 | 0 | 0 | 0.00182 | 0.000785 | 0.207317 | 1 | 0.080285 | false | 0.001016 | 0.022358 | 0.002033 | 0.13313 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e174818f6b393a98ed554aec714f2c139a01e0c8 | 1,624 | py | Python | lesson13/sunzhaohui/reboot/deploy/models.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson13/sunzhaohui/reboot/deploy/models.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson13/sunzhaohui/reboot/deploy/models.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
from django.db import models
from users.models import UserProfile
class Deploy(models.Model):
STATUS = (
(0, '申请'),
(1, '审核'),
(2, '上线'),
(3, '已取消'),
(4, '已上线'),
(5,'失败')
)
name = models.CharField(max_length=40, verbose_name='项目名称')
version = models.CharField(max_length=40, verbose_name='上线版本')
version_desc = models.CharField(max_length=100, verbose_name='版本描述')
applicant = models.ForeignKey(UserProfile, verbose_name='申请人', on_delete=models.CASCADE,
related_name="applicant")
reviewer = models.ForeignKey(UserProfile, verbose_name='审核人', on_delete=models.CASCADE,blank=True, null=True,
related_name="reviewer")
handler = models.ForeignKey(UserProfile, verbose_name='最终处理人', blank=True, null=True,
on_delete=models.CASCADE, related_name='handler')
update_detail = models.TextField(verbose_name='更新详情')
status = models.IntegerField(default=0, choices=STATUS, verbose_name='上线状态')
apply_time = models.DateTimeField(auto_now_add=True, verbose_name='申请时间')
review_time = models.DateTimeField(auto_now=False, verbose_name='审核时间',null=True)
deploy_time = models.DateTimeField(auto_now=False, verbose_name='上线时间',null=True)
end_time = models.DateTimeField(auto_now=False, verbose_name='结束时间',null=True)
build_serial = models.IntegerField(verbose_name='构建序号',default=0,null=True)
build_url = models.CharField(max_length=100,verbose_name='构建链接',null=True)
| 40.6 | 113 | 0.67734 | 201 | 1,624 | 5.288557 | 0.38806 | 0.144873 | 0.067733 | 0.09031 | 0.511759 | 0.331138 | 0.270931 | 0.129821 | 0 | 0 | 0 | 0.013751 | 0.193966 | 1,624 | 39 | 114 | 41.641026 | 0.798319 | 0.014778 | 0 | 0.068966 | 0 | 0 | 0.058307 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.103448 | 0 | 0.655172 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
e17d6ab7a795e35c2eccfd187299cdaa6e5f367c | 60,009 | py | Python | pySPACE/resources/dataset_defs/stream.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 32 | 2015-02-20T09:03:09.000Z | 2022-02-25T22:32:52.000Z | pySPACE/resources/dataset_defs/stream.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 5 | 2015-05-18T15:08:40.000Z | 2020-03-05T19:18:01.000Z | pySPACE/resources/dataset_defs/stream.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 18 | 2015-09-28T07:16:38.000Z | 2021-01-20T13:52:19.000Z | """ Reader objects and main class for continuous data (time series)
Depending on the storage format, the fitting reader is loaded and takes care
of reading the files.
.. todo:: unify with analyzer collection!
eeg source and analyzer sink node should work together
this connection should be documented when tested
"""
import os
import glob
import re
import numpy
import scipy
from scipy.io import loadmat
import warnings
import csv
from pySPACE.missions.support.windower import MarkerWindower
import logging
from pySPACE.resources.dataset_defs.base import BaseDataset
from pySPACE.missions.support.WindowerInterface import AbstractStreamReader
class StreamDataset(BaseDataset):
""" Wrapper for dealing with stream datasets like raw EEG datasets
For loading streaming data you need the
:class:`~pySPACE.missions.nodes.source.time_series_source.Stream2TimeSeriesSourceNode`
as described in :ref:`tutorial_node_chain_operation`.
If ``file_name`` is given in the :ref:`meta data <storage>`,
the corresponding file is loaded, otherwise ``storage_format`` is used
to search for the needed file.
Some formats are already supported, like EEG data in the .eeg/.vhdr/.vmrk
format and other streaming data in edf or csv format. It is also possible to
load EEGLAB format (.set/.fdt) which itself can import a variety of
different EEG formats (http://sccn.ucsd.edu/eeglab/).
**csv**
Labels can be coded with the help of an extra channel as a column
in the csv-file or an extra file.
Normally the label is transformed immediately to the label
or this is done later on with extra algorithms.
The file suffix should be *csv*.
Special Parameters in the metadata:
:sampling_frequency:
Frequency of the input data (corresponds to
1/(number of samples of one second))
(*optional, default: 1*)
:marker:
Name of the marker channel. If it is not found,
no marker is forwarded.
(*optional, default: 'marker'*)
:marker_file:
If the marker is not a column in the data file,
an external csv file in the same folder can be specified
with one column with the heading named like the *marker*
parameter and one column named *time* with increasing
numbers, which correspond to the index in the data file.
(First sample corresponds index one.)
Here, the relative path is needed as for file_name.
(*optional, default: None*)
**BP_eeg**
Here the standard BrainProducts format is expected with the corresponding
*.vhdr* and *.vmrk* with the same base name as the *.eeg* file.
**set**
EEGLABs format with two files (extension .set and .fdt) is expected.
**edf**
When using the European Data Format there are two different specifications
that are supported:
Plain EDF (see `EDF Spec <http://www.edfplus.info/specs/edf.html>`_) and
EDF+ (see `EDF+ Spec <http://www.edfplus.info/specs/edfplus.html>`_).
When using EDF there is no annotation- or marker-channel inside the data-
segment. You can process the data originating from a EDF file but be sure,
that you don't have any marker-information at hand, to later cut
the continuous data into interesting segments.
EDF+ extended the original EDF-Format by an annotations-channel
(named 'EDF+C') and added a feature to combine non-continuous
data segments (named 'EDF+D') in one file.
The EDF+C Format is fully supported i.e. the annotations-channel is
parsed and is forwarded in combination with the corresponding data
so that the data can later be cut into meaningful segments (windowing).
Files, which make use of the EDF+D option, can be streamed - BUT: The
information about different segments in the file is completely ignored!
The file is treated as if it contains EDF+C data. The full support for
EDF+D files may be integrated in a future release.
In any case, the file suffix should be *edf*.
.. warning:: Currently only one streaming dataset can be loaded
as testing data.
.. todo:: Implement loading of training and testing data.
**Parameters**
:dataset_md:
A dictionary with all the meta data.
(*optional, default: None*)
:dataset_dir:
The (absolute) directory of the dataset.
(*obligatory, default: None*)
:Author: Johannes Teiwes (johannes.teiwes@dfki.de)
:Date: 2010/10/13
:refactored: 2013/06/10 Johannes Teiwes and Mario Michael Krell
"""
def __init__(self, dataset_md=None, dataset_dir=None, **kwargs):
super(StreamDataset, self).__init__(dataset_md=dataset_md)
self.dataset_dir = dataset_dir
if not self.meta_data.has_key('storage_format'):
warnings.warn(
str("Storage Format not set for current dataset in %s" %
dataset_dir))
if self.meta_data.has_key("file_name"):
data_files = [os.path.join(dataset_dir,self.meta_data["file_name"])]
if not "storage_format" in self.meta_data:
self.meta_data["storage_format"] = \
os.path.splitext(data_files[0])[1].lower()
elif self.meta_data.has_key('storage_format'):
self.meta_data["storage_format"] = \
self.meta_data['storage_format'].lower()
# mapping of storage format to file suffix
suffix = self.meta_data['storage_format']
if "eeg" in suffix:
suffix = "eeg"
# searching files
data_files = glob.glob(os.path.join(
dataset_dir, str("*.%s" % suffix)))
if len(data_files) == 0 and suffix == "eeg":
suffix = "dat"
data_files = glob.glob(os.path.join(
dataset_dir, str("*.%s" % suffix)))
if len(data_files) == 0:
raise IOError, str("Cannot find any .%s file in %s" %
(suffix, dataset_dir))
if len(data_files) != 1:
raise IOError, str("Found more than one *.%s file in %s" %
(suffix, dataset_dir))
else:
# assume .eeg files
data_files = glob.glob(dataset_dir + os.sep + "*.eeg")
if len(data_files) == 0:
data_files = glob.glob(dataset_dir + os.sep + "*.dat")
assert len(data_files) == 1, \
"Error locating eeg-data files (.eeg/.dat)"
self.data_file = data_files[0]
self.reader = None
ec_files = glob.glob(dataset_dir + os.sep + "*.elc")
assert len(ec_files) <= 1, "More than one electrode position file found!"
if len(ec_files)==1:
try:
ec = {}
ec_file = open(ec_files[0], 'r')
while (ec_file.readline() != "Positions"):
pass
for line in ec_file:
if line == "Labels":
break
pair = line.split(":")
ec[pair[0]] = \
numpy.array([int(x) for x in pair[1].split(" ")])
nas = ec["NAS"]
lpa = ec["LPA"]
rpa = ec["RPA"]
origin = (rpa + lpa) * 0.5
vx = nas - origin
vx = vx / numpy.linalg.norm(vx)
vz = numpy.cross(vx, lpa - rpa)
vz = vz / numpy.linalg.norm(vz)
vy = numpy.cross(vz, vx)
vy = vy / numpy.linalg.norm(vy)
rotMat = numpy.linalg.inv(numpy.matrix([vx, vy, vz]))
transMat = numpy.dot(-rotMat, origin)
for k, v in self.ec.iteritems():
ec[k] = numpy.dot(transMat, numpy.dot(v, rotMat))
self.meta_data["electrode_coordinates"] = ec
self._log("Loaded dataset specific electrode position file", logging.INFO)
except Exception, e:
print e
#self.meta_data["electrode_coordinates"] = StreamDataset.ec
finally:
file.close()
# Spherical electrode coordinates (x-axis points to the right,
# y-axis to the front, z-axis runs through the vertex; 3 params: r (radius)
# set to 1 on standard caps, theta (angle between z-axis and line connecting
# point and coordinate origin, < 0 in left hemisphere, > 0 in right
# hemisphere) and phi (angle between x-axis and projection of the line
# connecting the point and coordinate origin on the xy plane, > 0 for front
# right and back left quadrants, < 0 for front left and back right)) are
# exported from analyzer2 (generic export; saved in header file) and
# converted to Cartesian coordinates via
# x = r * sin(rad(theta)) * cos(rad(phi))
# y = r * sin(rad(theta)) * sin(rad(phi))
# z = r * cos(rad(theta))
# electrodes FP1/Fp1 and FP2/Fp2 have same coordinates
ec = { 'CPP5h': (-0.72326832569043442, -0.50643793379675761, 0.46947156278589086),
'AFF1h': (-0.11672038362490393, 0.83050868362971098, 0.5446390350150272),
'O2': (0.30901699437494745, -0.95105651629515353, 6.123233995736766e-17),
'O1': (-0.30901699437494745, -0.95105651629515353, 6.123233995736766e-17),
'FCC6h': (0.82034360384187455, 0.1743694158206236, 0.5446390350150272),
'TPP8h': (0.86385168719631511, -0.47884080932566353, 0.15643446504023092),
'PPO10h': (0.69411523801289432, -0.69411523801289421, -0.1908089953765448),
'TP7': (-0.95105651629515353, -0.3090169943749474, 6.123233995736766e-17),
'CPz': (2.293803827831453e-17, -0.37460659341591201, 0.92718385456678742),
'CCP4h': (0.54232717509597328, -0.18673822182292288, 0.8191520442889918),
'TP9': (-0.87545213915725872, -0.28445164312142457, -0.3907311284892736),
'TP8': (0.95105651629515353, -0.3090169943749474, 6.123233995736766e-17),
'FCC5h': (-0.82034360384187455, 0.1743694158206236, 0.5446390350150272),
'CPP2h': (0.16769752048474765, -0.54851387399083462, 0.8191520442889918),
'FFC1h': (-0.16769752048474765, 0.54851387399083462, 0.8191520442889918),
'TPP7h': (-0.86385168719631511, -0.47884080932566353, 0.15643446504023092),
'PO10': (0.54105917752298882, -0.7447040698476447, -0.3907311284892736),
'FTT8h': (0.96671406082679645, 0.17045777155400837, 0.19080899537654492),
'Oz': (6.123233995736766e-17, -1.0, 6.123233995736766e-17),
'AFF2h': (0.11672038362490393, 0.83050868362971098, 0.5446390350150272),
'CCP3h': (-0.54232717509597328, -0.18673822182292288, 0.8191520442889918),
'CP1': (-0.35777550984135725, -0.37048738597260156, 0.85716730070211233),
'CP2': (0.35777550984135725, -0.37048738597260156, 0.85716730070211233),
'CP3': (-0.66008387202973706, -0.36589046498407451, 0.6560590289905075),
'CP4': (0.66008387202973706, -0.36589046498407451, 0.6560590289905075),
'CP5': (-0.87157241273869712, -0.33456530317942912, 0.35836794954530016),
'CP6': (0.87157241273869712, -0.33456530317942912, 0.35836794954530016),
'FFT7h': (-0.86385168719631511, 0.47884080932566353, 0.15643446504023092),
'FTT7h': (-0.96671406082679645, 0.17045777155400837, 0.19080899537654492),
'PPO5h': (-0.5455036073850148, -0.7790598895575418, 0.30901699437494745),
'AFp1': (-0.13661609910710645, 0.97207405517694545, 0.19080899537654492),
'AFp2': (0.13661609910710645, 0.97207405517694545, 0.19080899537654492),
'FT10': (0.87545213915725872, 0.28445164312142457, -0.3907311284892736),
'POO9h': (-0.44564941557132876, -0.87463622477252034, -0.1908089953765448),
'POO10h': (0.44564941557132876, -0.87463622477252034, -0.1908089953765448),
'T8': (1.0, -0.0, 6.123233995736766e-17),
'FT7': (-0.95105651629515353, 0.3090169943749474, 6.123233995736766e-17),
'FT9': (-0.87545213915725872, 0.28445164312142457, -0.3907311284892736),
'FT8': (0.95105651629515353, 0.3090169943749474, 6.123233995736766e-17),
'FFC3h': (-0.48133227677866169, 0.53457365038161042, 0.69465837045899737),
'P10': (0.74470406984764481, -0.54105917752298871, -0.3907311284892736),
'AF8': (0.58778525229247325, 0.80901699437494734, 6.123233995736766e-17),
'T7': (-1.0, -0.0, 6.123233995736766e-17),
'AF4': (0.36009496929665602, 0.89126632448749754, 0.27563735581699916),
'AF7': (-0.58778525229247325, 0.80901699437494734, 6.123233995736766e-17),
'AF3': (-0.36009496929665602, 0.89126632448749754, 0.27563735581699916),
'P2': (0.28271918486560565, -0.69975453766943163, 0.6560590289905075),
'P3': (-0.5450074457687164, -0.67302814507021891, 0.50000000000000011),
'CPP4h': (0.48133227677866169, -0.53457365038161042, 0.69465837045899737),
'P1': (-0.28271918486560565, -0.69975453766943163, 0.6560590289905075),
'P6': (0.72547341102583851, -0.63064441484306177, 0.27563735581699916),
'P7': (-0.80901699437494745, -0.58778525229247314, 6.123233995736766e-17),
'P4': (0.5450074457687164, -0.67302814507021891, 0.50000000000000011),
'P5': (-0.72547341102583851, -0.63064441484306177, 0.27563735581699916),
'P8': (0.80901699437494745, -0.58778525229247314, 6.123233995736766e-17),
'P9': (-0.74470406984764481, -0.54105917752298871, -0.3907311284892736),
'PPO2h': (0.11672038362490393, -0.83050868362971098, 0.5446390350150272),
'F10': (0.74470406984764481, 0.54105917752298871, -0.3907311284892736),
'TPP9h': (-0.87463622477252045, -0.4456494155713287, -0.1908089953765448),
'FTT9h': (-0.96954172390250215, 0.1535603233115839, -0.1908089953765448),
'CCP5h': (-0.82034360384187455, -0.1743694158206236, 0.5446390350150272),
'AFF6h': (0.5455036073850148, 0.7790598895575418, 0.30901699437494745),
'FFC2h': (0.16769752048474765, 0.54851387399083462, 0.8191520442889918),
'FCz': (2.293803827831453e-17, 0.37460659341591201, 0.92718385456678742),
'FCC2h': (0.1949050434465294, 0.19490504344652934, 0.96126169593831889),
'CPP1h': (-0.16769752048474765, -0.54851387399083462, 0.8191520442889918),
'FTT10h': (0.96954172390250215, 0.1535603233115839, -0.1908089953765448),
'Fz': (4.3297802811774658e-17, 0.70710678118654746, 0.70710678118654757),
'TTP8h': (0.96671406082679645, -0.17045777155400837, 0.19080899537654492),
'FFT9h': (-0.87463622477252045, 0.4456494155713287, -0.1908089953765448),
'Pz': (4.3297802811774658e-17, -0.70710678118654746, 0.70710678118654757),
'FFC4h': (0.48133227677866169, 0.53457365038161042, 0.69465837045899737),
'C3': (-0.70710678118654746, -0.0, 0.70710678118654757),
'C2': (0.39073112848927372, -0.0, 0.92050485345244037),
'C1': (-0.39073112848927372, -0.0, 0.92050485345244037),
'C6': (0.92718385456678731, -0.0, 0.37460659341591218),
'C5': (-0.92718385456678731, -0.0, 0.37460659341591218),
'C4': (0.70710678118654746, -0.0, 0.70710678118654757),
'TTP7h': (-0.96671406082679645, -0.17045777155400837, 0.19080899537654492),
'FC1': (-0.35777550984135725, 0.37048738597260156, 0.85716730070211233),
'FC2': (0.35777550984135725, 0.37048738597260156, 0.85716730070211233),
'FC3': (-0.66008387202973706, 0.36589046498407451, 0.6560590289905075),
'FC4': (0.66008387202973706, 0.36589046498407451, 0.6560590289905075),
'FC5': (-0.87157241273869712, 0.33456530317942912, 0.35836794954530016),
'FC6': (0.87157241273869712, 0.33456530317942912, 0.35836794954530016),
'FCC1h': (-0.1949050434465294, 0.19490504344652934, 0.96126169593831889),
'CPP6h': (0.72326832569043442, -0.50643793379675761, 0.46947156278589086),
'F1': (-0.28271918486560565, 0.69975453766943163, 0.6560590289905075),
'F2': (0.28271918486560565, 0.69975453766943163, 0.6560590289905075),
'F3': (-0.5450074457687164, 0.67302814507021891, 0.50000000000000011),
'F4': (0.5450074457687164, 0.67302814507021891, 0.50000000000000011),
'F5': (-0.72547341102583851, 0.63064441484306177, 0.27563735581699916),
'F6': (0.72547341102583851, 0.63064441484306177, 0.27563735581699916),
'F7': (-0.80901699437494745, 0.58778525229247314, 6.123233995736766e-17),
'F8': (0.80901699437494745, 0.58778525229247314, 6.123233995736766e-17),
'F9': (-0.74470406984764481, 0.54105917752298871, -0.3907311284892736),
'FFT8h': (0.86385168719631511, 0.47884080932566353, 0.15643446504023092),
'FFT10h': (0.87463622477252045, 0.4456494155713287, -0.1908089953765448),
'Cz': (0.0, 0.0, 1.0),
'FFC5h': (-0.72326832569043442, 0.50643793379675761, 0.46947156278589086),
'FCC4h': (0.54232717509597328, 0.18673822182292288, 0.8191520442889918),
'TP10': (0.87545213915725872, -0.28445164312142457, -0.3907311284892736),
'POz': (5.6364666119006729e-17, -0.92050485345244037, 0.39073112848927372),
'CPP3h': (-0.48133227677866169, -0.53457365038161042, 0.69465837045899737),
'FFC6h': (0.72326832569043442, 0.50643793379675761, 0.46947156278589086),
'PPO1h': (-0.11672038362490393, -0.83050868362971098, 0.5446390350150272),
'Fpz': (6.123233995736766e-17, 1.0, 6.123233995736766e-17),
'POO2': (0.13661609910710645, -0.97207405517694545, 0.19080899537654492),
'POO1': (-0.13661609910710645, -0.97207405517694545, 0.19080899537654492),
'I1': (-0.28651556797120703, -0.88180424668940116, -0.37460659341591207),
'I2': (0.28651556797120703, -0.88180424668940116, -0.37460659341591207),
'PPO9h': (-0.69411523801289432, -0.69411523801289421, -0.1908089953765448),
'FP1': (-0.30901699437494745, 0.95105651629515353, 6.123233995736766e-17),
'OI2h': (0.15356032331158395, -0.96954172390250215, -0.1908089953765448),
'FP2': (0.30901699437494745, 0.95105651629515353, 6.123233995736766e-17),
'CCP6h': (0.82034360384187455, -0.1743694158206236, 0.5446390350150272),
'FCC3h': (-0.54232717509597328, 0.18673822182292288, 0.8191520442889918),
'PO8': (0.58778525229247325, -0.80901699437494734, 6.123233995736766e-17),
'PO9': (-0.54105917752298882, -0.7447040698476447, -0.3907311284892736),
'PO7': (-0.58778525229247325, -0.80901699437494734, 6.123233995736766e-17),
'PO4': (0.36009496929665602, -0.89126632448749754, 0.27563735581699916),
'PO3': (-0.36009496929665602, -0.89126632448749754, 0.27563735581699916),
'Fp1': (-0.30901699437494745, 0.95105651629515353, 6.123233995736766e-17),
'Fp2': (0.30901699437494745, 0.95105651629515353, 6.123233995736766e-17),
'PPO6h': (0.5455036073850148, -0.7790598895575418, 0.30901699437494745),
'CCP2h': (0.1949050434465294, -0.19490504344652934, 0.96126169593831889),
'Iz': (5.6773636985816068e-17, -0.92718385456678742, -0.37460659341591207),
'AFF5h': (-0.5455036073850148, 0.7790598895575418, 0.30901699437494745),
'TPP10h': (0.87463622477252045, -0.4456494155713287, -0.1908089953765448),
'OI1h': (-0.15356032331158395, -0.96954172390250215, -0.1908089953765448),
'CCP1h': (-0.1949050434465294, -0.19490504344652934, 0.96126169593831889)
}
def store(self, result_dir, s_format="multiplexed"):
""" Not yet implemented! """
raise NotImplementedError("Storing of StreamDataset is currently not supported!")
@staticmethod
def project2d(ec_3d):
"""
Take a dictionary of 3d Cartesian electrode coordinates and return a
dictionary of their 2d projection in Cartesian coordinates.
"""
keys = []
x = []
y = []
z = []
for k, v in ec_3d.iteritems():
keys.append(k)
x.append(v[0])
y.append(v[1])
z.append(v[2])
x = numpy.array(x)
y = numpy.array(y)
z = numpy.array(z)
z = z - numpy.max(z)
# get spherical coordinates: normally this can be done via:
# phi = deg(atan2(y,x)); if < -90 -> + 180, if > 90 -> - 180
# theta = deg(arccos(z/r)); if x < 0 -> * (-1)
hypotxy = numpy.hypot(x, y)
r = numpy.hypot(hypotxy, z)
phi = numpy.arctan2(z, hypotxy)
theta = numpy.arctan2(y, x)
phi = numpy.maximum(phi, 0.001)
r2 = r / numpy.power(numpy.cos(phi), 0.2)
x = r2 * numpy.cos(theta) * 60
y = r2 * numpy.sin(theta) * 60
ec_2d = {}
for i in xrange(0, len(keys)):
ec_2d[keys[i]] = (x[i], y[i])
return ec_2d
def set_window_defs(self, window_definition, nullmarker_stride_ms=1000,
no_overlap=False, data_consistency_check=False):
""" Takes the window definition dictionary for later reading
The parameters are later on mainly forwarded to the
:class:`~pySPACE.missions.support.windower.MarkerWindower`.
To find more about these parameters, check out its documentation.
"""
self.window_definition = window_definition
self.nullmarker_stride_ms = nullmarker_stride_ms
self.no_overlap = no_overlap
self.data_consistency_check = data_consistency_check
def get_data(self, run_nr, split_nr, train_test):
if not (run_nr, split_nr, train_test) == (0, 0, "test"):
return self.data[(run_nr, split_nr, train_test)]
if self.meta_data.has_key('storage_format'):
if "bp_eeg" in self.meta_data['storage_format']:
# remove ".eeg" suffix
self.reader = EEGReader(self.data_file[:-4],
blocksize=100)
elif "set" in self.meta_data['storage_format']:
self.reader = SETReader(self.data_file[:-4])
elif "edf" in self.meta_data['storage_format']:
self.reader = EDFReader(self.data_file)
elif "csv" in self.meta_data['storage_format']:
sf = self.meta_data.get("sampling_frequency", 1)
try:
delimiter = self.meta_data["delimiter"]
except KeyError:
delimiter=None
try:
mf = os.path.join(self.dataset_dir,
self.meta_data["marker_file"])
except KeyError:
mf = None
if "marker" in self.meta_data:
marker = self.meta_data["marker"]
else:
marker = "marker"
self.reader = CsvReader(self.data_file, sampling_frequency=sf,
marker=marker, marker_file=mf,
delimiter=delimiter)
else:
self.reader = EEGReader(self.data_file, blocksize=100)
# Creates a windower that splits the training data into windows
# based in the window definitions provided
# and assigns correct labels to these windows
self.marker_windower = MarkerWindower(
self.reader, self.window_definition,
nullmarker_stride_ms=self.nullmarker_stride_ms,
no_overlap=self.no_overlap,
data_consistency_check=self.data_consistency_check)
return self.marker_windower
def parse_float(param):
""" Work around to catch colon instead of floating point """
try:
return float(param)
except ValueError, e:
warnings.warn("Failed float conversion from csv file.")
try:
return float(param.replace(".", "").replace(",", "."))
except:
warnings.warn("Secondary attempt at conversion also failed. " +
"Treating the value as string and return a 0 as " +
"placeholder.")
return float(0)
def get_csv_handler(file_handler):
"""Helper function to get a DictReader from csv"""
try:
dialect = csv.Sniffer().sniff(file_handler.read(2048))
file_handler.seek(0)
return csv.DictReader(file_handler, dialect=dialect)
except csv.Error, e:
class excel_space(csv.excel):
delimiter = ' '
warnings.warn(str(e))
csv.register_dialect("excel_space", excel_space)
file_handler.seek(0)
return csv.DictReader(file_handler, dialect=excel_space)
class CsvReader(AbstractStreamReader):
""" Load time series data from csv file
**Parameters**
:file_path:
Path of the file to be loaded.
(*optional, default: 'data.csv'*)
:sampling_frequency:
Underlying sampling frequency of the data in Hz
(*optional, default: 1*)
:marker:
Name of the marker channel. If it is not found,
no marker is forwarded.
(*optional, default: 'marker'*)
:marker_file:
If the marker is not a column in the data file,
an external csv file in the same folder can be specified
with one column with the heading named like the *marker*
parameter and one column named *time* with increasing
numbers, which correspond to the index in the data file.
(first time point gets zero.)
Here the absolute path is needed.
(*optional, default: None*)
:delimiter:
Delimiter used in the csv file.
(*optional, default: None*)
"""
def __init__(self, file_path, sampling_frequency=1, marker="marker",
marker_file=None, delimiter=None):
try:
self.file = open(file_path, "r")
except IOError as io:
warnings.warn("Failed to open file at [%s]" % file_path)
raise io
self._dSamplingInterval = sampling_frequency
self.marker = marker
self._markerids = dict()
self._markerNames = dict()
self.callbacks = list()
self.new_marker_id = 1
self.time_index = 1
try:
if not marker_file is None:
marker_file = open(marker_file, "r")
except IOError:
warnings.warn("Failed to open marker file at [%s]. Now ignored."
% marker_file)
self._markerids["null"] = 0
self._markerNames[0] = "null"
if delimiter is None:
self.DictReader = get_csv_handler(self.file)
else:
self.DictReader = csv.DictReader(self.file, delimiter=delimiter)
self.first_entry = self.DictReader.next()
self._channelNames = self.first_entry.keys()
self.MarkerReader = None
if not marker_file is None:
self.MarkerReader = get_csv_handler(marker_file)
if not self.MarkerReader is None:
self.update_marker()
if self.next_marker[0] == self.time_index:
self.first_marker = self.next_marker[1]
self.update_marker()
else:
self.first_marker = ""
elif self.marker in self._channelNames:
self._channelNames.remove(self.marker)
self.first_marker = self.first_entry.pop(self.marker)
else:
self.first_marker = ""
@property
def dSamplingInterval(self):
""" actually the sampling frequency """
return self._dSamplingInterval
@property
def stdblocksize(self):
""" standard block size (int) """
return 1
@property
def markerids(self):
""" mapping of markers/events in stream and unique integer (dict)
The dict has to contain the mapping 'null' -> 0 to use the
nullmarkerstride option in the windower.
"""
return self._markerids
@property
def channelNames(self):
""" list of channel/sensor names """
return self._channelNames
@property
def markerNames(self):
""" inverse mapping of markerids (dict) """
return self._markerNames
def regcallback(self, func):
""" register a function as consumer of the stream """
self.callbacks.append(func)
def read(self, nblocks=1):
""" Read *nblocks* of the stream and pass it to registers functions """
n = 0
while nblocks == -1 or n < nblocks:
if not self.first_entry is None:
samples, marker = self.first_entry, self.first_marker
self.first_entry = None
else:
try:
samples = self.DictReader.next()
except IOError:
break
if not self.MarkerReader is None:
if self.next_marker[0] == self.time_index:
marker = self.next_marker[1]
self.update_marker()
else:
marker = ""
elif self.marker in samples.keys():
marker = samples.pop(self.marker)
else:
marker = ""
# add marker to dict
if not marker == "" and not marker in self._markerids:
self._markerids[marker] = self.new_marker_id
self._markerNames[self.new_marker_id] = marker
self.new_marker_id += 1
# convert marker to array
markers = numpy.ones(1)*(-1)
if not marker == "":
markers[0] = self._markerids[marker]
# convert samples to array
# special handling of marker in channel names
# if the marker is in channelNames,
if self.marker in self.channelNames:
array_samples = numpy.zeros((len(self.channelNames)-1, 1))
else:
array_samples = numpy.zeros((len(self.channelNames), 1))
offset = 0
for index, channel in enumerate(self.channelNames):
if self.marker == channel:
offset -= 1
else:
array_samples[index + offset] = parse_float(samples[channel])
n += 1
for c in self.callbacks:
c(array_samples, markers)
self.time_index += 1
return n
def update_marker(self):
"""Update `next_marker` from `MarkerReader` information"""
try:
next = self.MarkerReader.next()
self.next_marker = (next["time"], next[self.marker])
except IOError:
pass
class EDFReader(AbstractStreamReader):
""" Read EDF-Data
On Instantiation it will automatically assign the value
for the blocksize coded in the edf-file to its own
attribute 'stdblocksize'.
The Feature, that different signals can have different
sampling rates is eliminated in a way, that every value
of a lower sampled signal is repeated so that it fits
the highest sampling rate present in the dataset. This
is needed to have the same length for every signal
in the returned array.
"""
def __init__(self, abs_edffile_path):
"""Initializes module and opens specified file."""
try:
self.edffile = open(abs_edffile_path, "r")
except IOError as io:
warnings.warn(str("failed to open file at [%s]" % abs_edffile_path))
raise io
# variables to later overwrite
# the properties from AbstractStreamReader
self.callbacks = list()
self._dSamplingInterval = 0
self._stdblocksize = 0
self._markerids = dict()
self._channelNames = dict()
self._markerNames = dict()
# gains, frequency for each channel
self.gains = []
self.phy_min = []
self.dig_min = []
self.frequency = []
self.num_channels = 0
self.num_samples = []
self.edf_plus = False
self.edf_header_length = 0
self.annotations = None
self.num_samples_anno = None
self.timepoint = 0.0
self.generate_meta_data()
def __str__(self):
return ("EDFReader Object (%d@%s)\n" + \
"\tEDF File:\t %s\n" + \
"\tFile Format:\t %s\n" + \
"\tBlocksize:\t %d\n" + \
"\tnChannels:\t %d\n"
"\tfrequency:\t %d [Hz] (interval: %d [ns])\n") % (
os.getpid(), os.uname()[1],
os.path.realpath(self.edffile.name),
"EDF+" if self.edf_plus else "EDF",
self.stdblocksize, len(self.channelNames),
self.dSamplingInterval, 1000000/self.dSamplingInterval)
@property
def dSamplingInterval(self):
return self._dSamplingInterval
@property
def stdblocksize(self):
return self._stdblocksize
@property
def markerids(self):
return self._markerids
@property
def channelNames(self):
return self._channelNames[:-1] if self.edf_plus else self._channelNames
@property
def markerNames(self):
return self._markerNames
def read_edf_header(self):
"""Read edf-header information"""
m = dict()
m["version"] = self.edffile.read(8)
m["subject_id"] = self.edffile.read(80).strip()
m["recording_id"] = self.edffile.read(80).strip()
m["start_date"] = self.edffile.read(8)
m["start_time"] = self.edffile.read(8)
m["num_bytes_header"] = int(self.edffile.read(8).strip())
m["edf_c_d"] = self.edffile.read(44).strip()
m["num_data_records"] = self.edffile.read(8)
m["single_record_duration"] = float(self.edffile.read(8))
m["num_channels"] = int(self.edffile.read(4))
m["channel_names"] = list()
for i in range(m["num_channels"]):
m["channel_names"].append(self.edffile.read(16).strip())
m["electrode_type"] = list()
for i in range(m["num_channels"]):
m["electrode_type"].append(self.edffile.read(80).strip())
m["phy_dims"] = list()
for i in range(m["num_channels"]):
m["phy_dims"].append(self.edffile.read(8).strip())
m["phy_min"] = list()
for i in range(m["num_channels"]):
m["phy_min"].append(float(self.edffile.read(8).strip()))
m["phy_max"] = list()
for i in range(m["num_channels"]):
m["phy_max"].append(float(self.edffile.read(8).strip()))
m["dig_min"] = list()
for i in range(m["num_channels"]):
m["dig_min"].append(float(self.edffile.read(8).strip()))
m["dig_max"] = list()
for i in range(m["num_channels"]):
m["dig_max"].append(float(self.edffile.read(8).strip()))
m["prefilter"] = list()
for i in range(m["num_channels"]):
m["prefilter"].append(self.edffile.read(80).strip())
m["single_record_num_samples"] = list()
for i in range(m["num_channels"]):
m["single_record_num_samples"].append(int(self.edffile.read(8).strip()))
m["reserved"] = self.edffile.read(32*m["num_channels"])
# check position in file!
assert self.edffile.tell() == m["num_bytes_header"], "EDF Header corrupt!"
self.edf_header_length = self.edffile.tell()
return m
def read_edf_data(self):
"""read one record inside the data section of the edf-file"""
edfsignal = []
edfmarkers = numpy.ones(max(self.num_samples))*(-1)
# get markers from self.annotations
if self.annotations is not None:
current_annotations = numpy.where(
numpy.array(self.annotations.keys()) <
self.timepoint+self.delta)[0]
for c in current_annotations:
tmarker = self.annotations.keys()[c]-self.timepoint
pmarker = int((tmarker/self.delta)*max(self.num_samples))
edfmarkers[pmarker] = self.markerids[self.annotations[self.annotations.keys()[c]]]
self.annotations.pop(self.annotations.keys()[c])
self.timepoint += self.delta
# in EDF+ the last channel has the annotations,
# otherwise it is treated as regular signal channel
if self.edf_plus:
for i,n in enumerate(self.num_samples):
data = self.edffile.read(n*2)
if len(data) != n*2:
raise IOError
channel = numpy.fromstring(data, dtype=numpy.int16).astype(numpy.float32)
signal = (channel - self.dig_min[i]) * self.gains[i] + self.phy_min[i]
# simple upsampling for integer factors
# TODO: may use scipy.resample ..
if signal.shape[0] != max(self.num_samples):
factor = max(self.num_samples)/signal.shape[0]
assert type(factor) == int, str("Signal cannot be upsampled by non-int factor %f!" % factor)
signal = signal.repeat(factor, axis=0)
edfsignal.append(signal)
else:
for i,n in enumerate(self.num_samples):
data = self.edffile.read(n*2)
if len(data) != n*2:
raise IOError
channel = numpy.fromstring(data, dtype=numpy.int16).astype(numpy.float32)
signal = (channel - self.dig_min[i]) * self.gains[i] + self.phy_min[i]
# simple upsampling for integer factors
# TODO: may use scipy.resample ..
if signal.shape[0] != max(self.num_samples):
factor = max(self.num_samples)/signal.shape[0]
assert type(factor) == int, str("Signal cannot be upsampled by non-int factor %f!" % factor)
signal = signal.repeat(factor, axis=0)
edfsignal.append(signal)
return edfsignal, edfmarkers
def parse_annotations(self):
""" Parses times and names of the annotations
This is done beforehand - annotations are later
added to the streamed data. """
self.edffile.seek(self.edf_header_length, os.SEEK_SET)
self.annotations = dict()
data_bytes_to_skip = sum(self.num_samples)*2
while True:
self.edffile.read(data_bytes_to_skip)
anno = self.edffile.read(self.num_samples_anno*2)
if len(anno) != self.num_samples_anno*2:
break
anno = anno.strip()
marker = anno.split(chr(20))
if marker[2][1:].startswith(chr(0)):
continue
base = float(marker[0])
offset = float(marker[2][1:])
name = str(marker[3])
self.annotations[base+offset] = name.strip()
def generate_meta_data(self):
""" Generate the necessary meta data for the windower """
m = self.read_edf_header()
# calculate gain for each channel
self.gains = [(px-pn)/(dx-dn) for px,pn,dx,dn in zip(m["phy_max"], m["phy_min"], m["dig_max"], m["dig_min"])]
self.dig_min = m["dig_min"]
self.phy_min = m["phy_min"]
self._channelNames = m["channel_names"]
self.num_channels = m["num_channels"]
self.num_samples = m["single_record_num_samples"]
# separate data from annotation channel
if m["edf_c_d"] in ["EDF+D", "EDF+C"]:
self.edf_plus = True
# the annotation channel is called "EDF Annotations" and is the last channel
assert "EDF Annotations" == m["channel_names"][-1], "Cannot determine Annotations Channel!"
if m["edf_c_d"] in ["EDF+D"]:
warnings.warn(str("The file %s contains non-continuous data-segments.\n"
"This feature is not supported and may lead to unwanted results!") % self.edffile.name)
self.num_samples_anno = self.num_samples.pop() # ignore sampling rate of the annotations channel
else :
self.edf_plus = False
# calculate sampling interval for each channel
self.frequency = [ns/m["single_record_duration"] for ns in self.num_samples]
self._dSamplingInterval = max(self.frequency)
self._stdblocksize = max(self.num_samples)
self.delta = self.stdblocksize / max(self.frequency)
# generate all marker names and ids
self._markerids['null'] = 0
# in edf+ case we can parse them from annotations
if self.edf_plus :
self.parse_annotations()
for i,(t,name) in enumerate(self.annotations.iteritems()):
self._markerids[name] = i+1
else:
warnings.warn("no marker channel is set - no markers will be streamed!")
for s in range(1,256,1):
self._markerids[str('S%3d' % s)] = s
for r in range(1,256,1):
self._markerids[str('R%3d' % r)] = r+256
# generate reverse mapping
for k,v in zip(self._markerids.iterkeys(), self._markerids.itervalues()):
self._markerNames[v] = k
# reset file position to begin of data section
self.edffile.seek(self.edf_header_length, os.SEEK_SET)
# Register callback function
def regcallback(self, func):
self.callbacks.append(func)
# Forwards block of data until all data is send
def read(self, nblocks=1, verbose=False):
"""read data and call registered callbacks """
n = 0
while nblocks == -1 or n < nblocks:
try:
samples, markers = self.read_edf_data()
except IOError:
break
n += 1
for c in self.callbacks:
c(samples, markers)
return n
class SETReader(AbstractStreamReader):
""" Load eeglab .set format
Read eeglab format when the data has not been segmented yet. It is further
assumed that the data is stored binary in another file with extension .fdt.
Further possibilities are .dat format or to store everything in the .set
file. Both is currently not supported.
"""
def __init__(self, abs_setfile_path, blocksize=100, verbose=False):
self.abs_setfile_path = abs_setfile_path
self._stdblocksize = blocksize
self.callbacks = list()
self._dSamplingInterval = 0
self._markerids = {"null": 0}
self._channelNames = dict()
self._markerNames = {0: "null"}
self.read_set_file()
self.fdt_handle = open(self.abs_data_path,'rb')
self.latency = 0
self.current_marker_index = 0
@property
def dSamplingInterval(self):
return self._dSamplingInterval
@property
def stdblocksize(self):
return self._stdblocksize
@property
def markerids(self):
return self._markerids
@property
def channelNames(self):
return self._channelNames
@property
def markerNames(self):
return self._markerNames
def read_set_file(self):
setdata = loadmat(self.abs_setfile_path + '.set', appendmat=False)
# check if stream data
ntrials = setdata['EEG']['trials'][0][0][0][0]
assert(ntrials == 1), "Data consists of more than one trial. This is not supported!"
# check if data is stored in fdt format
datafilename = setdata['EEG']['data'][0][0][0]
assert(datafilename.split('.')[-1] == 'fdt'), "Data is not in fdt format!"
# collect meta information
self._dSamplingInterval = setdata['EEG']['srate'][0][0][0][0]
self._channelNames = numpy.hstack(setdata['EEG']['chanlocs'][0][0][ \
'labels'][0]).astype(numpy.str_).tolist()
self.nChannels = setdata['EEG']['nbchan'][0][0][0][0]
self.marker_data = numpy.hstack(setdata['EEG']['event'][0][0][ \
'type'][0]).astype(numpy.str_)
for marker in numpy.unique(self.marker_data):
marker_number = len(self._markerNames)
self._markerNames[marker_number] = marker
self._markerids[marker] = marker_number
self.marker_times = numpy.hstack(setdata['EEG']['event'][0][0][ \
'latency'][0]).flatten()
self.abs_data_path = os.path.join(os.path.dirname(self.abs_setfile_path),
datafilename)
def regcallback(self, func):
self.callbacks.append(func)
def read(self, nblocks=1, verbose=False):
readblocks = 0
while (readblocks < nblocks or nblocks == -1):
ret, samples, markers = self.read_fdt_data()
if ret:
for f in self.callbacks:
f(samples, markers)
else:
break
readblocks += 1
return readblocks
def read_fdt_data(self):
if self.fdt_handle == None:
return False, None, None
num_samples = self.nChannels * self._stdblocksize
markers = numpy.zeros(self._stdblocksize)
markers.fill(-1)
###### READ DATA FROM FILE ######
try:
samples = numpy.fromfile(self.fdt_handle, dtype=numpy.float32,
count=num_samples)
except MemoryError:
# assuming, that a MemoryError only occurs when file is finished
self.fdt_handle.close()
self.fdt_handle = None
return False, None, None
# True when EOF reached in last or current block
if samples.size < num_samples:
self.fdt_handle.close()
self.fdt_handle = None
if samples.size == 0:
return False, None, None
temp = samples
samples = numpy.zeros(num_samples)
numpy.put(samples, range(temp.size), temp)
# need channel x time matrix
samples = samples.reshape((self.stdblocksize, self.nChannels)).T
###### READ MARKERS FROM FILE ######
for l in range(self.current_marker_index,len(self.marker_times)):
if self.marker_times[l] > self.latency + self._stdblocksize:
self.current_marker_index = l
self.latency += self._stdblocksize
break
else:
rel_marker_pos = (self.marker_times[l] - 1) % self._stdblocksize
markers[rel_marker_pos] = self._markerids[self.marker_data[l]]
return True, samples, markers
class EEGReader(AbstractStreamReader):
""" Load raw EEG data in the .eeg brain products format
This module does the Task of parsing
.vhdr, .vmrk end .eeg/.dat files and then hand them
over to the corresponding windower which
iterates over the aggregated data.
"""
def __init__(self, abs_eegfile_path, blocksize=100, verbose=False):
self.abs_eegfile_path = abs_eegfile_path
self._stdblocksize = blocksize
self.eeg_handle = None
self.mrk_handle = None
self.eeg_dtype = numpy.int16
self.callbacks = list()
# variable names with capitalization correspond to
# structures members defined in RecorderRDA.h
self.nChannels, \
self._dSamplingInterval, \
self.resolutions, \
self._channelNames, \
self.channelids, \
self._markerids, \
self._markerNames, \
self.nmarkertypes = self.bp_meta()
if verbose:
print "channelNames:", self.channelNames, "\n"
print "channelids:", self.channelids, "\n"
print "markerNames:", self.markerNames, "\n"
print "markerids:", self.markerids, "\n"
print "resolutions:", self.resolutions, "\n"
# open the eeg-file
if self.eeg_handle == None:
try:
self.eeg_handle = open(self.abs_eegfile_path + '.eeg', 'rb')
except IOError:
try:
self.eeg_handle = open(self.abs_eegfile_path + '.dat', 'rb')
except IOError:
raise IOError, "EEG-file [%s.{dat,eeg}] could not be opened!" % os.path.realpath(self.abs_eegfile_path)
self.callbacks = list()
self.ndsamples = None # last sample block read
self.ndmarkers = None # last marker block read
@property
def dSamplingInterval(self):
return self._dSamplingInterval
@property
def stdblocksize(self):
return self._stdblocksize
@property
def markerids(self):
return self._markerids
@property
def channelNames(self):
return self._channelNames
@property
def markerNames(self):
return self._markerNames
# This function gathers meta information from the .vhdr and .vmrk files.
# Only the relevant information is then stored in variables, the windower
# accesses during the initialisation phase.
def bp_meta(self):
nChannels = 0
dSamplingInterval = 0
resolutions = list()
channelNames = list()
channelids = dict()
markerids = dict()
markerNames = dict()
nmarkertypes = 0
prefix = ''
# helper function to convert resolutions
# 0 = 100 nV, 1 = 500 nV, 2 = 10 {mu}V, 3 = 152.6 {mu}V
def res_conv(num, res):
# convert num to nV
if ord(res[0]) == 194:
num = num*1000
if num <= 100: return 0
if num <= 500: return 1
if num <= 10000: return 2
return 3
# Start with vhdr file
file_path = self.abs_eegfile_path + '.vhdr'
hdr = open(file_path)
for line in hdr:
if line.startswith(";"): continue
# Read the words between brackets like "[Common Infos]"
if line.startswith('['):
prefix = line.partition("[")[2].partition("]")[0].lower()
continue
if line.find("=") == -1: continue
# Common Infos and Binary Infos
if(prefix == 'common infos' or prefix == 'binary infos'):
key, value = line.split('=')
key = key.lower()
value = value.lower()
if(key == 'datafile'):
pass # something like filename.eeg
elif(key == 'markerfile'):
mrk_file = value
elif(key == 'dataformat'):
pass # usually BINARY
elif(key == 'dataorientation'):
eeg_data_or = value
elif(key == 'datatype'):
pass # something like TIMEDOMAIN
elif(key == 'numberofchannels'):
nChannels = int(value)
elif(key == 'datapoints'):
pass # the number of datapoints in the whole set
elif(key == 'samplinginterval'):
dSamplingInterval = int(1000000/float(value))
elif(key == 'binaryformat'):
if re.match("int_16", value, flags=re.IGNORECASE) == None:
self.eeg_dtype = numpy.float32
else:
self.eeg_dtype = numpy.int16
elif(key == 'usebigendianorder'):
bin_byteorder = value
# Channel Infos
# ; Each entry: Ch<Channel number>=<Name>,<Reference channel name>,
# ; <Resolution in "Unit">,<Unit>,
elif(prefix == 'channel infos'):
key, value = line.split('=')
if re.match("^[a-z]{2}[0-9]{1,3}", key, flags=re.IGNORECASE) == None:
continue
ch_id = int(re.findall(r'\d+', key)[0])
ch_name = value.split(',')[0]
ch_ref = value.split(',')[1]
if len(re.findall(r'\d+', value.split(',')[2])) == 0:
ch_res_f = 0
else:
ch_res_f = float(re.findall(r'\d+', value.split(',')[2])[0])
ch_res_unit = value.split(',')[3]
channelNames.append(ch_name)
channelids[ch_name] = ch_id
resolutions.append(res_conv(ch_res_f, ch_res_unit))
# Everything thats left..
else:
#print "parsing finished!"
break
hdr.close()
# Continue with marker file
# Priority:
# 1: Path from .vhdr
# 2: Path constructed from eegfile path
prefix = ''
markerNames[0] = 'null'
try:
self.mrk_handle = open(os.path.basename(self.abs_eegfile_path) + mrk_file)
except IOError:
try:
self.mrk_handle = open(self.abs_eegfile_path + '.vmrk')
except IOError:
raise IOError, str("Could not open [%s.vmrk]!" % os.path.realpath(self.abs_eegfile_path))
# Parse file
for line in self.mrk_handle:
if line.startswith(";"): continue
# Read the words between brackets like "[Common Infos]"
if line.startswith('['):
prefix = line.partition("[")[2].partition("]")[0].lower()
continue
if line.find("=") == -1: continue
if prefix == "marker infos":
mrk_name = line.split(',')[1]
if mrk_name != "" and mrk_name not in markerNames.values():
markerNames[len(markerNames)] = mrk_name
# rewinds the marker file
self.mrk_handle.seek(0, os.SEEK_SET)
# helper struct for finding markers
self.mrk_info = dict()
self.mrk_info['line'] = ""
self.mrk_info['position'] = 0
# advance to first marker line
while(re.match("^Mk1=", self.mrk_info['line'], re.IGNORECASE) == None):
try:
self.mrk_info['line'] = self.mrk_handle.next()
except StopIteration:
self.mrk_handle.close()
raise StopIteration, str("Reached EOF while searching for first Marker in [%s]" % os.path.realpath(self.mrk_handle.name))
# TODO: Sort markerNames?
def compare (x,y):
return cmp(self.int(re.findall(r'\d+', x)[0]), int(re.findall(r'\d+', y)[0]))
for key in markerNames:
markerids[markerNames[key]] = key
markertypes = len(markerids)
return nChannels, \
dSamplingInterval, \
resolutions, \
channelNames, \
channelids, \
markerids, \
markerNames, \
markertypes
# This function reads the eeg-file and the marker-file for every
# block of data which is processed.
def bp_read(self, verbose=False):
if self.eeg_handle == None:
return False, None, None
num_samples = self.nChannels*self.stdblocksize
markers = numpy.zeros(self.stdblocksize)
markers.fill(-1)
samples = numpy.zeros(num_samples)
###### READ EEG-DATA FROM FILE ######
try:
samples = numpy.fromfile(self.eeg_handle, dtype=self.eeg_dtype, count=num_samples)
except MemoryError:
# assuming, that a MemoryError only occurs when file is finished
self.eeg_handle.close()
self.eeg_handle = None
return False, None, None
# True when EEG-File's EOF reached in last or current block
if samples.size < num_samples:
self.eeg_handle.close()
self.eeg_handle = None
if samples.size == 0:
return False, None, None
temp = samples
samples = numpy.zeros(num_samples)
numpy.put(samples, range(temp.size), temp)
samples = samples.reshape((self.stdblocksize, self.nChannels))
samples = scipy.transpose(samples)
###### READ MARKERS FROM FILE ######
self.mrk_info['position'] += self.stdblocksize
mk_posi = 0
mk_desc = ""
while True:
mk = self.mrk_info['line'].split(',')
if len(mk) < 2 or mk[1] == "":
try:
self.mrk_info['line'] = self.mrk_handle.next()
except:
self.mrk_handle.close()
#self._log("WARNING: EOF[%s]\n" % os.path.realpath(self.mrk_handle.name))
break
continue
mk_desc = mk[1]
mk_posi = int(mk[2])
if mk_posi > self.mrk_info['position']:
break
# special treatment for 'malformed' markerfiles
mk_rel_position = (mk_posi-1) % self.stdblocksize
if markers[mk_rel_position] != -1 :
# store marker for next point
mk[2] = str(mk_posi+1)
self.mrk_info['line'] = ",".join(["%s" % (m) for m in mk])
#self._log(str("WARNING: shifted position of marker \"%s\" from %d to %d!\n" % (mk_desc, mk_posi, mk_posi+1)))
if mk_rel_position+1 > self.stdblocksize-1:
return True, samples, markers
else:
continue
else :
markers[mk_rel_position] = self.markerids[mk_desc]
self.mrk_info['line'] = ""
# try to read next line from markerfile
try:
self.mrk_info['line'] = self.mrk_handle.next()
except:
self.mrk_handle.close()
break
return True, samples, markers
# string representation with interesting information
def __str__(self):
return ("EEGReader Object (%d@%s)\n" + \
"\tEEG File:\t %s\n" + \
"\tMRK File:\t %s\n" + \
"\tFile Format:\t %s\n" + \
"\tBlocksize:\t %d\n" + \
"\tnChannels:\t %d\n") % (os.getpid(), os.uname()[1], os.path.realpath(self.eeg_handle.name),
os.path.realpath(self.mrk_handle.name), self.eeg_dtype,
self.stdblocksize, self.nChannels)
# Register callback function
def regcallback(self, func):
self.callbacks.append(func)
# Reads data from .eeg/.dat file until EOF
def read(self, nblocks=1, verbose=False):
self.stop = False
readblocks = 0
while (readblocks < nblocks or nblocks == -1):
ret, self.ndsamples, self.ndmarkers = self.bp_read()
if ret:
for f in self.callbacks:
f(self.ndsamples, self.ndmarkers)
else:
break
readblocks += 1
return readblocks
| 42.111579 | 137 | 0.580613 | 6,744 | 60,009 | 5.071026 | 0.155694 | 0.002047 | 0.013334 | 0.005614 | 0.453639 | 0.418316 | 0.243136 | 0.179654 | 0.146496 | 0.128425 | 0 | 0.175084 | 0.307771 | 60,009 | 1,424 | 138 | 42.141152 | 0.648195 | 0.071256 | 0 | 0.331933 | 0 | 0 | 0.070029 | 0.002941 | 0 | 0 | 0 | 0.002809 | 0.008403 | 0 | null | null | 0.006303 | 0.012605 | null | null | 0.006303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e17f627e7014eaf9f501344de8ac94066bc5da4f | 70,860 | py | Python | katsdpscripts/rts_session.py | ska-sa/katsdpscripts | f9eaa867aad8b94c715f7286953124df00b5781c | [
"BSD-3-Clause"
] | null | null | null | katsdpscripts/rts_session.py | ska-sa/katsdpscripts | f9eaa867aad8b94c715f7286953124df00b5781c | [
"BSD-3-Clause"
] | 21 | 2019-09-16T15:26:53.000Z | 2022-01-11T09:14:39.000Z | katsdpscripts/rts_session.py | ska-sa/katsdpscripts | f9eaa867aad8b94c715f7286953124df00b5781c | [
"BSD-3-Clause"
] | 1 | 2019-11-11T11:47:54.000Z | 2019-11-11T11:47:54.000Z | ###############################################################################
# SKA South Africa (http://ska.ac.za/) #
# Author: cam@ska.ac.za #
# Copyright @ 2013 SKA SA. All rights reserved. #
# #
# THIS SOFTWARE MAY NOT BE COPIED OR DISTRIBUTED IN ANY FORM WITHOUT THE #
# WRITTEN PERMISSION OF SKA SA. #
###############################################################################
"""CaptureSession encompassing data capturing and standard observations with RTS.
This defines the :class:`CaptureSession` class, which encompasses the capturing
of data and the performance of standard scans with the RTS system. It also
provides a fake :class:`TimeSession` class, which goes through the motions in
order to time them, but without performing any real actions.
"""
import time
import logging
import sys
import os.path
import numpy as np
import katpoint
# This is used to document available spherical projections (and set them in case of TimeSession)
from katcorelib.targets import Offset
from .array import Array
from .katcp_client import KATClient
from .defaults import user_logger, activity_logger
from katmisc.utils.utils import dynamic_doc
# Obtain list of spherical projections and the default projection from antenna proxy
projections, default_proj = Offset.PROJECTIONS.keys(), Offset.DEFAULT_PROJECTION
# Move default projection to front of list
projections.remove(default_proj)
projections.insert(0, default_proj)
def ant_array(kat, ants, name='ants'):
"""Create sub-array of antennas from flexible specification.
Parameters
----------
kat : :class:`utility.KATCoreConn` object
KAT connection object
ants : :class:`Array` or :class:`KATClient` object, or list, or string
Antennas specified by an Array object containing antenna devices, or
a single antenna device or a list of antenna devices, or a string of
comma-separated antenna names, or the string 'all' for all antennas
controlled via the KAT connection associated with this session
Returns
-------
array : :class:`Array` object
Array object containing selected antenna devices
Raises
------
ValueError
If antenna with a specified name is not found on KAT connection object
"""
if isinstance(ants, Array):
return ants
elif isinstance(ants, KATClient):
return Array(name, [ants])
elif isinstance(ants, basestring):
if ants.strip() == 'all':
return kat.ants
else:
try:
return Array(name, [getattr(kat, ant.strip()) for ant in ants.split(',')])
except AttributeError:
raise ValueError("Antenna '%s' not found (i.e. no kat.%s exists)" % (ant, ant))
else:
# The default assumes that *ants* is a list of antenna devices
return Array(name, ants)
def report_compact_traceback(tb):
"""Produce a compact traceback report."""
print '--------------------------------------------------------'
print 'Session interrupted while doing (most recent call last):'
print '--------------------------------------------------------'
while tb:
f = tb.tb_frame
print '%s %s(), line %d' % (f.f_code.co_filename, f.f_code.co_name, f.f_lineno)
tb = tb.tb_next
print '--------------------------------------------------------'
class ScriptLogHandler(logging.Handler):
"""Logging handler that writes logging records to HDF5 file via ingest.
Parameters
----------
data : :class:`KATClient` object
Data proxy device for the session
"""
def __init__(self, data):
logging.Handler.__init__(self)
self.data = data
def emit(self, record):
"""Emit a logging record."""
try:
msg = self.format(record)
# XXX This probably has to go to cam2spead as a req/sensor combo [YES]
# self.data.req.k7w_script_log(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class ObsParams(dict):
"""Dictionary-ish that writes observation parameters to CAM SPEAD stream.
Parameters
----------
data : :class:`KATClient` object
Data proxy device for the session
product : string
Name of data product
"""
def __init__(self, data, product):
dict.__init__(self)
self.data = data
self.product = product
def __setitem__(self, key, value):
# XXX Changing data product name -> ID in a hard-coded fashion
self.data.req.set_obs_param(self.product, key, repr(value))
dict.__setitem__(self, key, value)
class RequestSensorError(Exception):
"""Critical request failed or critical sensor could not be read."""
pass
class CaptureSessionBase(object):
def get_ant_names(self):
return ','.join(co for co in self.kat.controlled_objects
if co in self.kat.katconfig.arrays['ants'])
class CaptureSession(CaptureSessionBase):
"""Context manager that encapsulates a single data capturing session.
A data capturing *session* results in a single data file, potentially
containing multiple scans and compound scans. An *experiment* may consist of
multiple sessions. This object ensures that the capturing process is
started and completed cleanly, even if exceptions occur during the session.
It also provides canned routines for simple observations such as tracks,
single scans and raster scans on a specific source.
The initialisation of the session object does basic preparation of the data
capturing subsystem (ingest) and logging. It tries to do the minimum to
enable data capturing. The experimental setup is usually completed by
calling :meth:`standard_setup` on the instantiated session object.
The actual data capturing only starts once :meth:`capture_start` is called.
Parameters
----------
kat : :class:`utility.KATCoreConn` object
KAT connection object associated with this experiment
product : string, optional
Data product (unchanged by default)
dump_rate : float, optional
Correlator dump rate, in Hz (will be set by default)
kwargs : dict, optional
Ignore any other keyword arguments (simplifies passing options as dict)
Raises
------
ValueError
If data proxy is not connected
RequestSensorError
If ingest system did not initialise or data product could not be selected
"""
def __init__(self, kat, product=None, dump_rate=1.0, **kwargs):
try:
self.kat = kat
# Hard-code the RTS data proxy for now
data, katsys = kat.data_rts, kat.sys
if not data.is_connected():
raise ValueError("Data proxy '%s' is not connected "
"(is the KAT system running?)" % (data.name,))
self.data = self.dbe = data
# Default settings for session parameters (in case standard_setup is not called)
self.ants = None
self.experiment_id = 'interactive'
self.stow_when_done = False
self.nd_params = {'diode': 'default', 'on': 0., 'off': 0., 'period': -1.}
self.last_nd_firing = 0.
self.output_file = ''
self.horizon = 3.0
# Requested dump period, replaced by actual value after capture started
self.dump_period = self._requested_dump_period = 1.0 / dump_rate
# # XXX last dump timestamp?
# self._end_of_previous_session = data.sensor.k7w_last_dump_timestamp.get_value()
# XXX Hard-code product name for now
self.product = 'c856M32k' if product is None else product
data.req.product_configure(self.product, dump_rate, timeout=330)
# Enable logging to the new HDF5 file via the usual logger (using same formatting and filtering)
self._script_log_handler = ScriptLogHandler(data)
if len(user_logger.handlers) > 0:
self._script_log_handler.setLevel(user_logger.handlers[0].level)
self._script_log_handler.setFormatter(user_logger.handlers[0].formatter)
user_logger.addHandler(self._script_log_handler)
user_logger.info('==========================')
user_logger.info('New data capturing session')
user_logger.info('--------------------------')
user_logger.info('Data proxy used = %s' % (data.name,))
user_logger.info('Data product = %s' % (self.product,))
# XXX file name? SB ID? Program block ID? -> [file via capture_done]
# # Obtain the name of the file currently being written to
# reply = data.req.k7w_get_current_file()
# outfile = reply[1] if reply.succeeded else '<unknown file>'
outfile = '<unknown file>'
user_logger.info('Opened output file = %s' % (outfile,))
user_logger.info('')
activity_logger.info("----- Script starting %s (%s). Output file %s" % (sys.argv[0], ' '.join(sys.argv[1:]), outfile))
# Log details of the script to the back-end
self.obs_params = ObsParams(data, self.product)
katsys.req.set_script_param('script-starttime', time.time())
katsys.req.set_script_param('script-endtime', '')
katsys.req.set_script_param('script-name', sys.argv[0])
katsys.req.set_script_param('script-arguments', ' '.join(sys.argv[1:]))
katsys.req.set_script_param('script-status', 'busy')
except Exception, e:
msg = 'CaptureSession failed to initialise (%s)' % (e,)
user_logger.error(msg)
activity_logger.info(msg)
if hasattr(self, '_script_log_handler'):
user_logger.removeHandler(self._script_log_handler)
raise
def __enter__(self):
"""Enter the data capturing session."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Exit the data capturing session, closing the data file."""
if exc_value is not None:
exc_msg = str(exc_value)
msg = "Session interrupted by exception (%s%s)" % \
(exc_value.__class__.__name__,
(": '%s'" % (exc_msg,)) if exc_msg else '')
if exc_type is KeyboardInterrupt:
user_logger.warning(msg)
activity_logger.warning(msg)
else:
user_logger.error(msg, exc_info=True)
activity_logger.error(msg, exc_info=True)
self.end(interrupted=True)
else:
self.end(interrupted=False)
# Suppress KeyboardInterrupt so as not to scare the lay user,
# but allow other exceptions that occurred in the body of with-statement
if exc_type is KeyboardInterrupt:
report_compact_traceback(traceback)
return True
else:
return False
def get_centre_freq(self):
"""Get RF (sky) frequency associated with middle CBF channel.
Returns
-------
centre_freq : float
Actual centre frequency in MHz (or NaN if something went wrong)
"""
# XXX Something like this? [YES]
# return self.data.sensor.cbf_${product}_centerfrequency.get_value()
return 1284.0
def set_centre_freq(self, centre_freq):
"""Set RF (sky) frequency associated with middle CBF channel.
Parameters
----------
centre_freq : float
Desired centre frequency in MHz
"""
# XXX This will be a data product change instead...
pass
def standard_setup(self, observer, description, experiment_id=None,
nd_params=None, stow_when_done=None, horizon=None, **kwargs):
"""Perform basic experimental setup including antennas, LO and dump rate.
This performs the basic high-level setup that most experiments require.
It should usually be called as the first step in a new session
(unless the experiment has special requirements, such as holography).
The user selects a subarray of antennas that will take part in the
experiment, identifies him/herself and describes the experiment.
Optionally, the user may also set the RF centre frequency, dump rate
and noise diode firing strategy, amongst others. All optional settings
are left unchanged if unspecified, except for the dump rate, which has
to be set (due to the fact that there is currently no way to determine
the dump rate...).
The antenna specification *ants* do not have a default, which forces the
user to specify them explicitly. This is for safety reasons, to remind
the user of which antennas will be moved around by the script. The
*observer* and *description* similarly have no default, to force the
user to document the observation to some extent.
Parameters
----------
ants : :class:`Array` or :class:`KATClient` object, or list, or string
Antennas that will participate in the capturing session, as an Array
object containing antenna devices, or a single antenna device or a
list of antenna devices, or a string of comma-separated antenna
names, or the string 'all' for all antennas controlled via the
KAT connection associated with this session
observer : string
Name of person doing the observation
description : string
Short description of the purpose of the capturing session
experiment_id : string, optional
Experiment ID, a unique string used to link the data files of an
experiment together with blog entries, etc. (unchanged by default)
nd_params : dict, optional
Dictionary containing parameters that control firing of the noise
diode during canned commands. These parameters are in the form of
keyword-value pairs, and matches the parameters of the
:meth:`fire_noise_diode` method. This is unchanged by default
(typically disabling automatic firing).
stow_when_done : {False, True}, optional
If True, stow the antennas when the capture session completes
(unchanged by default)
horizon : float, optional
Elevation limit serving as horizon for session, in degrees
kwargs : dict, optional
Ignore any other keyword arguments (simplifies passing options as dict)
Raises
------
ValueError
If antenna with a specified name is not found on KAT connection object
RequestSensorError
If Data centre frequency could not be set
"""
# Create references to allow easy copy-and-pasting from this function
session, kat, data, katsys = self, self.kat, self.data, self.kat.sys
session.ants = ants = ant_array(kat, self.get_ant_names())
ant_names = [ant.name for ant in ants]
# Override provided session parameters (or initialize them from existing parameters if not provided)
session.experiment_id = experiment_id = session.experiment_id if experiment_id is None else experiment_id
session.nd_params = nd_params = session.nd_params if nd_params is None else nd_params
session.stow_when_done = stow_when_done = session.stow_when_done if stow_when_done is None else stow_when_done
session.horizon = session.horizon if horizon is None else horizon
# Prep capturing system
data.req.capture_init(self.product)
# Setup strategies for the sensors we might be wait()ing on
ants.req.sensor_sampling('lock', 'event')
ants.req.sensor_sampling('scan.status', 'event')
ants.req.sensor_sampling('mode', 'event')
# XXX can we still get these sensors somewhere?
# data.req.sensor_sampling('k7w.spead_dump_period', 'event')
# data.req.sensor_sampling('k7w.last_dump_timestamp', 'event')
centre_freq = self.get_centre_freq()
# Check this...
# # The data proxy needs to know the dump period (in s) as well as the RF centre frequency
# # of 400-MHz downconverted band (in Hz), which is used for fringe stopping / delay tracking
# data.req.capture_setup(1. / dump_rate, session.get_centre_freq(200.0) * 1e6)
user_logger.info('Antennas used = %s' % (' '.join(ant_names),))
user_logger.info('Observer = %s' % (observer,))
user_logger.info("Description ='%s'" % (description,))
user_logger.info('Experiment ID = %s' % (experiment_id,))
user_logger.info('Data product = %s' % (self.product,))
user_logger.info("RF centre frequency = %g MHz, dump rate = %g Hz" % (centre_freq, 1.0 / self.dump_period))
if nd_params['period'] > 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s, every %g s if possible" % \
(nd_params['diode'], nd_params['on'], nd_params['off'], nd_params['period'])
elif nd_params['period'] == 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s at every opportunity" % \
(nd_params['diode'], nd_params['on'], nd_params['off'])
else:
nd_info = "Noise diode will not fire automatically"
user_logger.info(nd_info + " while performing canned commands")
# Send script options to SPEAD stream
self.obs_params['observer'] = observer
self.obs_params['description'] = description
self.obs_params['experiment_id'] = experiment_id
self.obs_params['nd_params'] = nd_params
self.obs_params['stow_when_done'] = stow_when_done
self.obs_params['horizon'] = session.horizon
self.obs_params['centre_freq'] = centre_freq
self.obs_params['product'] = self.product
self.obs_params.update(kwargs)
# Send script options to CAM system
katsys.req.set_script_param('script-ants', ','.join(ant_names))
katsys.req.set_script_param('script-observer', observer)
katsys.req.set_script_param('script-description', description)
katsys.req.set_script_param('script-experiment-id', experiment_id)
katsys.req.set_script_param('script-rf-params',
'Centre freq=%g MHz, Dump rate=%g Hz' % (centre_freq, 1.0 / self.dump_period))
katsys.req.set_script_param('script-nd-params', 'Diode=%s, On=%g s, Off=%g s, Period=%g s' %
(nd_params['diode'], nd_params['on'], nd_params['off'], nd_params['period']))
# If the CBF is simulated, it will have position update commands
if hasattr(data.req, 'cbf_pointing_az') and hasattr(data.req, 'cbf_pointing_el'):
def listener_actual_azim(update_seconds, value_seconds, status, value):
#Listener callback now includes status, use it here
if status == 'nominal':
data.req.cbf_pointing_az(value)
def listener_actual_elev(update_seconds, value_seconds, status, value):
#Listener callback now includes status, use it here
if status == 'nominal':
data.req.cbf_pointing_el(value)
first_ant = ants[0]
# The minimum time between position updates is fraction of dump period to ensure fresh data at every dump
update_period_seconds = 0.4 * self.dump_period
# Tell the position sensors to report their values periodically at this rate
first_ant.sensor.pos_actual_scan_azim.set_strategy('period', str(float(update_period_seconds)))
first_ant.sensor.pos_actual_scan_elev.set_strategy('period', str(float(update_period_seconds)))
# Tell the Data simulator where the first antenna is so that it can generate target flux at the right time
first_ant.sensor.pos_actual_scan_azim.register_listener(listener_actual_azim, update_period_seconds)
first_ant.sensor.pos_actual_scan_elev.register_listener(listener_actual_elev, update_period_seconds)
user_logger.info("CBF simulator receives position updates from antenna '%s'" % (first_ant.name,))
user_logger.info("--------------------------")
def capture_start(self):
"""Start capturing data to HDF5 file."""
# This starts the data product stream
self.data.req.capture_start(self.product)
def label(self, label):
"""Add timestamped label to HDF5 file.
The label is typically a single word used to indicate the start of a
new compound scan.
"""
if label:
# XXX Changing data product name -> ID in a hard-coded fashion
self.data.req.set_obs_label(self.product, label)
user_logger.info("New compound scan: '%s'" % (label,))
def on_target(self, target):
"""Determine whether antennas are tracking a given target.
If all connected antennas in the sub-array participating in the session
have the given *target* as target and are locked in mode 'POINT', we
conclude that the array is on target.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to check, as an object or description string
Returns
-------
on_target : {True, False}
True if antennas are tracking the given target
"""
if self.ants is None:
return False
# Turn target object into description string (or use string as is)
target = getattr(target, 'description', target)
for ant in self.ants:
# Ignore disconnected antennas or ones with missing sensors
if not ant.is_connected() or any([s not in ant.sensor for s in ('target', 'mode', 'lock')]):
continue
if (ant.sensor.target.get_value() != target) or (ant.sensor.mode.get_value() != 'POINT') or \
(ant.sensor.lock.get_value() != '1'):
return False
return True
def target_visible(self, target, duration=0., timeout=300.):
"""Check whether target is visible for given duration.
This checks whether the *target* is currently above the session horizon
and also above the horizon for the next *duration* seconds, taking into
account the *timeout* on slewing to the target. If the target is not
visible, an appropriate message is logged. The target location is not
very accurate, as it does not include refraction, and this is therefore
intended as a rough check only.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to check, as an object or description string
duration : float, optional
Duration of observation of target, in seconds
timeout : float, optional
Timeout involved when antenna cannot reach the target
Returns
-------
visible : {True, False}
True if target is visible from all antennas for entire duration
"""
if self.ants is None:
return False
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
horizon = katpoint.deg2rad(self.horizon)
# Include an average time to slew to the target (worst case about 90 seconds, so half that)
now = time.time() + 45.
average_el, visible_before, visible_after = [], [], []
# Ignore disconnected antennas or ones with missing sensors
ant_descriptions = [ant.sensor.observer.get_value() for ant in self.ants
if ant.is_connected() and 'observer' in ant.sensor]
# Also ignore antennas with empty or missing observer strings
antennas = [katpoint.Antenna(descr) for descr in ant_descriptions if descr]
if not antennas:
user_logger.warning("No usable antennas found - target '%s' assumed to be down" % (target.name,))
return False
for antenna in antennas:
az, el = target.azel(now, antenna)
average_el.append(katpoint.rad2deg(el))
# If not up yet, see if the target will pop out before the timeout
if el < horizon:
now += timeout
az, el = target.azel(now, antenna)
visible_before.append(el >= horizon)
# Check what happens at end of observation
az, el = target.azel(now + duration, antenna)
visible_after.append(el >= horizon)
if all(visible_before) and all(visible_after):
return True
always_invisible = any(~np.array(visible_before) & ~np.array(visible_after))
if always_invisible:
user_logger.warning("Target '%s' is never up during requested period (average elevation is %g degrees)" %
(target.name, np.mean(average_el)))
else:
user_logger.warning("Target '%s' will rise or set during requested period" % (target.name,))
return False
def fire_noise_diode(self, diode='coupler', on=10.0, off=10.0, period=0.0, align=True, announce=True):
"""Switch noise diode on and off.
This switches the selected noise diode on and off for all the antennas
doing the observation.
The on and off durations can be specified. Additionally, setting the
*period* allows the noise diode to be fired on a semi-regular basis. The
diode will only be fired if more than *period* seconds have elapsed since
the last firing. If *period* is 0, the diode is fired unconditionally.
On the other hand, if *period* is negative it is not fired at all.
Parameters
----------
diode : {'coupler', 'pin'}
Noise diode source to use (pin diode is situated in feed horn and
produces high-level signal, while coupler diode couples into
electronics after the feed at a much lower level)
on : float, optional
Minimum duration for which diode is switched on, in seconds
off : float, optional
Minimum duration for which diode is switched off, in seconds
period : float, optional
Minimum time between noise diode firings, in seconds. (The maximum
time is determined by the duration of individual slews and scans,
which are considered atomic and won't be interrupted.) If 0, fire
diode unconditionally. If negative, don't fire diode at all.
align : {True, False}, optional
True if noise diode transitions should be aligned with correlator
dump boundaries, or False if they should happen as soon as possible
announce : {True, False}, optional
True if start of action should be announced, with details of settings
Returns
-------
fired : {True, False}
True if noise diode fired
Notes
-----
When the function returns, data will still be recorded to the HDF5 file.
The specified *off* duration is therefore a minimum value. Remember to
run :meth:`end` to close the file and finally stop the observation
(automatically done when this object is used in a with-statement)!
"""
# XXX This needs a rethink...
return False
#
# if self.ants is None:
# raise ValueError('No antennas specified for session - please run session.standard_setup first')
# # Create references to allow easy copy-and-pasting from this function
# session, kat, ants, data, dump_period = self, self.kat, self.ants, self.data, self.dump_period
#
# # Wait for the dump period to become known, as it is needed to set a good timeout for the first dump
# if dump_period == 0.0:
# if not data.wait('k7w_spead_dump_period', lambda sensor: sensor.value > 0, timeout=1.5 * session._requested_dump_period, poll_period=0.2 * session._requested_dump_period):
# dump_period = session.dump_period = session._requested_dump_period
# user_logger.warning('SPEAD metadata header is overdue at ingest - noise diode will be out of sync')
# else:
# # Get actual dump period in seconds (as opposed to the requested period)
# dump_period = session.dump_period = data.sensor.k7w_spead_dump_period.get_value()
# # This can still go wrong if the sensor times out - again fall back to requested period
# if dump_period is None:
# dump_period = session.dump_period = session._requested_dump_period
# user_logger.warning('Could not read actual dump period - noise diode will be out of sync')
# # Wait for the first correlator dump to appear, both as a check that capturing works and to align noise diode
# last_dump = data.sensor.k7w_last_dump_timestamp.get_value()
# if last_dump == session._end_of_previous_session or last_dump is None:
# user_logger.info('waiting for correlator dump to arrive')
# # Wait for the first correlator dump to appear
# if not data.wait('k7w_last_dump_timestamp', lambda sensor: sensor.value > session._end_of_previous_session,
# timeout=2.2 * dump_period, poll_period=0.2 * dump_period):
# last_dump = time.time()
# user_logger.warning('Correlator dump is overdue at k7_capture - noise diode will be out of sync')
# else:
# last_dump = data.sensor.k7w_last_dump_timestamp.get_value()
# if last_dump is None:
# last_dump = time.time()
# user_logger.warning('Could not read last dump timestamp - noise diode will be out of sync')
# else:
# user_logger.info('correlator dump arrived')
#
# # If period is non-negative, quit if it is not yet time to fire the noise diode
# if period < 0.0 or (time.time() - session.last_nd_firing) < period:
# return False
#
# if align:
# # Round "on" duration up to the nearest integer multiple of dump period
# on = np.ceil(float(on) / dump_period) * dump_period
# # The last fully complete dump is more than 1 dump period in the past
# next_dump = last_dump + 2 * dump_period
# # The delay in setting up noise diode firing - next dump should be at least this far in future
# lead_time = 0.25
# # Find next suitable dump boundary
# now = time.time()
# while next_dump < now + lead_time:
# next_dump += dump_period
#
# if announce:
# user_logger.info("Firing '%s' noise diode (%g seconds on, %g seconds off)" % (diode, on, off))
# else:
# user_logger.info('firing noise diode')
#
# if align:
# # Schedule noise diode switch-on on all antennas at the next suitable dump boundary
# ants.req.rfe3_rfe15_noise_source_on(diode, 1, 1000 * next_dump, 0)
# # If using Data simulator, fire the simulated noise diode for desired period to toggle power levels in output
# if hasattr(data.req, 'data_fire_nd') and dump_period > 0:
# time.sleep(max(next_dump - time.time(), 0))
# data.req.data_fire_nd(np.ceil(float(on) / dump_period))
# # Wait until the noise diode is on
# time.sleep(max(next_dump + 0.5 * on - time.time(), 0))
# # Schedule noise diode switch-off on all antennas a duration of "on" seconds later
# ants.req.rfe3_rfe15_noise_source_on(diode, 0, 1000 * (next_dump + on), 0)
# time.sleep(max(next_dump + on + off - time.time(), 0))
# # Mark on -> off transition as last firing
# session.last_nd_firing = next_dump + on
# else:
# # Switch noise diode on on all antennas
# ants.req.rfe3_rfe15_noise_source_on(diode, 1, 'now', 0)
# # If using Data simulator, fire the simulated noise diode for desired period to toggle power levels in output
# if hasattr(data.req, 'data_fire_nd'):
# data.req.data_fire_nd(np.ceil(float(on) / dump_period))
# time.sleep(on)
# # Mark on -> off transition as last firing
# session.last_nd_firing = time.time()
# # Switch noise diode off on all antennas
# ants.req.rfe3_rfe15_noise_source_on(diode, 0, 'now', 0)
# time.sleep(off)
#
# user_logger.info('noise diode fired')
# return True
def set_target(self, target):
"""Set target to use for tracking or scanning.
This sets the target on all antennas involved in the session, as well as
on the CBF (where it serves as delay-tracking centre). It also moves the
test target in the Data simulator to match the requested target (if it is
a stationary 'azel' type).
Parameters
----------
target : :class:`katpoint.Target` object or string
Target as an object or description string
"""
if self.ants is None:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
# Create references to allow easy copy-and-pasting from this function
ants, data = self.ants, self.data
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
# Set the antenna target (antennas will already move there if in mode 'POINT')
ants.req.target(target)
# Provide target to the data proxy, which will use it as delay-tracking center
# XXX No fringe stopping support in data_rts yet
# data.req.target(target)
# If using Data simulator and target is azel type, move test target here (allows changes in correlation power)
if hasattr(data.req, 'cbf_test_target') and target.body_type == 'azel':
azel = katpoint.rad2deg(np.array(target.azel()))
data.req.cbf_test_target(azel[0], azel[1], 100.)
def track(self, target, duration=20.0, announce=True):
"""Track a target.
This tracks the specified target with all antennas involved in the
session.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to track, as an object or description string
duration : float, optional
Minimum duration of track, in seconds
announce : {True, False}, optional
True if start of action should be announced, with details of settings
Returns
-------
success : {True, False}
True if track was successfully completed
Notes
-----
When the function returns, the antennas will still track the target and
data will still be recorded to the HDF5 file. The specified *duration*
is therefore a minimum value. Remember to run :meth:`end` to close the
file and finally stop the observation (automatically done when this
object is used in a with-statement)!
"""
if self.ants is None:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
# Create references to allow easy copy-and-pasting from this function
session, ants = self, self.ants
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating %g-second track on target '%s'" % (duration, target.name))
if not session.target_visible(target, duration):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
session.set_target(target)
session.fire_noise_diode(announce=False, **session.nd_params)
# Avoid slewing if we are already on target
if not session.on_target(target):
user_logger.info('slewing to target')
# Start moving each antenna to the target
ants.req.mode('POINT')
# Wait until they are all in position (with 5 minute timeout)
ants.wait('lock', True, 300)
user_logger.info('target reached')
session.fire_noise_diode(announce=False, **session.nd_params)
user_logger.info('tracking target')
# Do nothing else for the duration of the track
time.sleep(duration)
user_logger.info('target tracked for %g seconds' % (duration,))
session.fire_noise_diode(announce=False, **session.nd_params)
return True
@dynamic_doc("', '".join(projections), default_proj)
def scan(self, target, duration=30.0, start=(-3.0, 0.0), end=(3.0, 0.0),
index=-1, projection=default_proj, announce=True):
"""Scan across a target.
This scans across a target with all antennas involved in the session.
The scan starts at an offset of *start* degrees from the target and ends
at an offset of *end* degrees. These offsets are calculated in a projected
coordinate system (see *Notes* below). The scan lasts for *duration*
seconds.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to scan across, as an object or description string
duration : float, optional
Minimum duration of scan across target, in seconds
start : sequence of 2 floats, optional
Initial scan position as (x, y) offset in degrees (see *Notes* below)
end : sequence of 2 floats, optional
Final scan position as (x, y) offset in degrees (see *Notes* below)
index : integer, optional
Scan index, used for display purposes when this is part of a raster
projection : {'%s'}, optional
Name of projection in which to perform scan relative to target
(default = '%s')
announce : {True, False}, optional
True if start of action should be announced, with details of settings
Returns
-------
success : {True, False}
True if scan was successfully completed
Notes
-----
Take note that scanning is done in a projection on the celestial sphere,
and the scan start and end are in the projected coordinates. The azimuth
coordinate of a scan in azimuth will therefore change more than the
*start* and *end* parameters suggest, especially at high elevations
(unless the 'plate-carree' projection is used). This ensures that the
same scan parameters will lead to the same qualitative scan for any
position on the celestial sphere.
When the function returns, the antennas will still track the end-point of
the scan and data will still be recorded to the HDF5 file. The specified
*duration* is therefore a minimum value. Remember to run :meth:`end` to
close the file and finally stop the observation (automatically done when
this object is used in a with-statement)!
"""
if self.ants is None:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
# Create references to allow easy copy-and-pasting from this function
session, ants = self, self.ants
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
scan_name = 'scan' if index < 0 else 'scan %d' % (index,)
if announce:
user_logger.info("Initiating %g-second scan across target '%s'" % (duration, target.name))
if not session.target_visible(target, duration):
user_logger.warning("Skipping scan, as target '%s' will be below horizon" % (target.name,))
return False
session.set_target(target)
session.fire_noise_diode(announce=False, **session.nd_params)
user_logger.info('slewing to start of %s' % (scan_name,))
# Move each antenna to the start position of the scan
ants.req.scan_asym(start[0], start[1], end[0], end[1], duration, projection)
ants.req.mode('POINT')
# Wait until they are all in position (with 5 minute timeout)
ants.wait('lock', True, 300)
user_logger.info('start of %s reached' % (scan_name,))
session.fire_noise_diode(announce=False, **session.nd_params)
user_logger.info('performing %s' % (scan_name,))
# Start scanning the antennas
ants.req.mode('SCAN')
# Wait until they are all finished scanning (with 5 minute timeout)
ants.wait('scan_status', 'after', 300)
user_logger.info('%s complete' % (scan_name,))
session.fire_noise_diode(announce=False, **session.nd_params)
return True
@dynamic_doc("', '".join(projections), default_proj)
def raster_scan(self, target, num_scans=3, scan_duration=30.0, scan_extent=6.0, scan_spacing=0.5,
scan_in_azimuth=True, projection=default_proj, announce=True):
"""Perform raster scan on target.
A *raster scan* is a series of scans across a target performed by all
antennas involved in the session, scanning in either azimuth or
elevation while the other coordinate is changed in steps for each scan.
Each scan is offset by the same amount on both sides of the target along
the scanning coordinate (and therefore has the same extent), and the
scans are arranged symmetrically around the target in the non-scanning
(stepping) direction. If an odd number of scans are done, the middle
scan will therefore pass directly over the target. The default is to
scan in azimuth and step in elevation, leading to a series of horizontal
scans. Each scan is scanned in the opposite direction to the previous
scan to save time. Additionally, the first scan always starts at the top
left of the target, regardless of scan direction.
Parameters
----------
target : :class:`katpoint.Target` object or string
Target to scan across, as an object or description string
num_scans : integer, optional
Number of scans across target (an odd number is better, as this will
scan directly over the source during the middle scan)
scan_duration : float, optional
Minimum duration of each scan across target, in seconds
scan_extent : float, optional
Extent (angular length) of scan along scanning coordinate, in degrees
(see *Notes* below)
scan_spacing : float, optional
Separation between each consecutive scan along the coordinate that is
not scanned but stepped, in degrees
scan_in_azimuth : {True, False}
True if azimuth changes during scan while elevation remains fixed;
False if scanning in elevation and stepping in azimuth instead
projection : {'%s'}, optional
Name of projection in which to perform scan relative to target
(default = '%s')
announce : {True, False}, optional
True if start of action should be announced, with details of settings
Returns
-------
success : {True, False}
True if raster scan was successfully completed
Notes
-----
Take note that scanning is done in a projection on the celestial sphere,
and the scan extent and spacing apply to the projected coordinates.
The azimuth coordinate of a scan in azimuth will therefore change more
than the *scan_extent* parameter suggests, especially at high elevations.
This ensures that the same scan parameters will lead to the same
qualitative scan for any position on the celestial sphere.
When the function returns, the antennas will still track the end-point of
the last scan and data will still be recorded to the HDF5 file. The
specified *scan_duration* is therefore a minimum value. Remember to run
:meth:`end` to close the files and finally stop the observation
(automatically done when this object is used in a with-statement)!
"""
if self.ants is None:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
# Create references to allow easy copy-and-pasting from this function
session = self
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating raster scan (%d %g-second scans extending %g degrees) on target '%s'" %
(num_scans, scan_duration, scan_extent, target.name))
# Calculate average time that noise diode is operated per scan, to add to scan duration in check below
nd_time = session.nd_params['on'] + session.nd_params['off']
nd_time *= scan_duration / max(session.nd_params['period'], scan_duration)
nd_time = nd_time if session.nd_params['period'] >= 0 else 0.
# Check whether the target will be visible for entire duration of raster scan
if not session.target_visible(target, (scan_duration + nd_time) * num_scans):
user_logger.warning("Skipping raster scan, as target '%s' will be below horizon" % (target.name,))
return False
# Create start and end positions of each scan, based on scan parameters
scan_levels = np.arange(-(num_scans // 2), num_scans // 2 + 1)
scanning_coord = (scan_extent / 2.0) * (-1) ** scan_levels
stepping_coord = scan_spacing * scan_levels
# Flip sign of elevation offsets to ensure that the first scan always starts at the top left of target
scan_starts = zip(scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, -scanning_coord)
scan_ends = zip(-scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, scanning_coord)
# Perform multiple scans across the target
for scan_index, (start, end) in enumerate(zip(scan_starts, scan_ends)):
session.scan(target, duration=scan_duration, start=start, end=end,
index=scan_index, projection=projection, announce=False)
return True
def end(self, interrupted=False):
"""End the session, which stops data capturing and closes the data file.
This does not affect the antennas, which continue to perform their
last action (unless explicitly asked to stow).
Parameters
----------
interrupted : {False, True}, optional
True if session got interrupted via an exception
"""
try:
# Create references to allow easy copy-and-pasting from this function
session, ants, data, katsys = self, self.ants, self.data, self.kat.sys
# XXX still relevant? -> via [capture_done]
# # Obtain the name of the file currently being written to
# reply = data.req.k7w_get_current_file()
# outfile = reply[1].replace('writing', 'unaugmented') if reply.succeeded else '<unknown file>'
# user_logger.info('Scans complete, data captured to %s' % (outfile,))
# # The final output file name after augmentation
# session.output_file = os.path.basename(outfile).replace('.unaugmented', '')
# Stop the data flow
data.req.capture_stop(self.product)
# Stop streaming KATCP sensor updates to the capture thread
# data.req.katcp2spead_stop_stream()
user_logger.info('Ended data capturing session with experiment ID %s' % (session.experiment_id,))
katsys.req.set_script_param('script-endtime', time.time())
katsys.req.set_script_param('script-status', 'interrupted' if interrupted else 'completed')
activity_logger.info('Ended data capturing session (%s) with experiment ID %s' %
('interrupted' if interrupted else 'completed', session.experiment_id,))
if session.stow_when_done and self.ants is not None:
user_logger.info('stowing dishes')
activity_logger.info('Stowing dishes')
ants.req.mode('STOW')
user_logger.info('==========================')
finally:
# Disable logging to HDF5 file
user_logger.removeHandler(self._script_log_handler)
# Finally close the HDF5 file and prepare for augmentation after all logging and parameter settings are done
data.req.capture_done(self.product)
activity_logger.info("----- Script ended %s (%s)" % (sys.argv[0], ' '.join(sys.argv[1:])))
class TimeSession(CaptureSessionBase):
"""Fake CaptureSession object used to estimate the duration of an experiment."""
def __init__(self, kat, product=None, dump_rate=1.0, **kwargs):
self.kat = kat
self.data = kat.data_rts
# Default settings for session parameters (in case standard_setup is not called)
self.ants = None
self.experiment_id = 'interactive'
self.stow_when_done = False
self.nd_params = {'diode': 'coupler', 'on': 0., 'off': 0., 'period': -1.}
self.last_nd_firing = 0.
self.output_file = ''
self.dump_period = self._requested_dump_period = 1.0 / dump_rate
self.horizon = 3.0
self.start_time = self._end_of_previous_session = time.time()
self.time = self.start_time
self.projection = ('ARC', 0., 0.)
# Actual antenna elevation limit (as opposed to user-requested session horizon)
self.el_limit = 2.5
# Usurp time module functions that deal with the passage of real time, and connect them to session time instead
self._realtime, self._realsleep = time.time, time.sleep
time.time = lambda: self.time
def simsleep(seconds):
self.time += seconds
time.sleep = simsleep
self._fake_ants = []
# Modify logging so that only stream handlers are active and timestamps are prepended with a tilde
for handler in user_logger.handlers:
if isinstance(handler, logging.StreamHandler):
form = handler.formatter
form.old_datefmt = form.datefmt
form.datefmt = 'DRY-RUN: ' + (form.datefmt if form.datefmt else '%Y-%m-%d %H:%M:%S')
else:
handler.old_level = handler.level
handler.setLevel(100)
user_logger.info('Estimating duration of experiment starting now (nothing real will happen!)')
user_logger.info('==========================')
user_logger.info('New data capturing session')
user_logger.info('--------------------------')
user_logger.info("Data proxy used = %s" % (self.data.name,))
if product is None:
user_logger.info('Data product = unknown to simulator')
else:
user_logger.info('Data product = %s' % (product,))
activity_logger.info("Timing simulation. ----- Script starting %s (%s). Output file None" % (sys.argv[0], ' '.join(sys.argv[1:])))
def __enter__(self):
"""Start time estimate, overriding the time module."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Finish time estimate, restoring the time module."""
self.end()
# Do not suppress any exceptions that occurred in the body of with-statement
return False
def _azel(self, target, timestamp, antenna):
"""Target (az, el) position in degrees (including offsets in degrees)."""
projection_type, x, y = self.projection
az, el = target.plane_to_sphere(katpoint.deg2rad(x), katpoint.deg2rad(y), timestamp, antenna, projection_type)
return katpoint.rad2deg(az), katpoint.rad2deg(el)
def _teleport_to(self, target, mode='POINT'):
"""Move antennas instantaneously onto target (or nearest point on horizon)."""
for m in range(len(self._fake_ants)):
antenna = self._fake_ants[m][0]
az, el = self._azel(target, self.time, antenna)
self._fake_ants[m] = (antenna, mode, az, max(el, self.el_limit))
def _slew_to(self, target, mode='POINT', timeout=300.):
"""Slew antennas to target (or nearest point on horizon), with timeout."""
slew_times = []
for ant, ant_mode, ant_az, ant_el in self._fake_ants:
def estimate_slew(timestamp):
"""Obtain instantaneous target position and estimate time to slew there."""
# Target position right now
az, el = self._azel(target, timestamp, ant)
# If target is below horizon, aim at closest point on horizon
az_dist, el_dist = np.abs(az - ant_az), np.abs(max(el, self.el_limit) - ant_el)
# Ignore azimuth wraps and drive strategies
az_dist = az_dist if az_dist < 180. else 360. - az_dist
# Assume az speed of 2 deg/s, el speed of 1 deg/s and overhead of 1 second
slew_time = max(0.5 * az_dist, 1.0 * el_dist) + 1.0
return az, el, slew_time
# Initial estimate of slew time, based on a stationary target
az1, el1, slew_time = estimate_slew(self.time)
# Crude adjustment for target motion: chase target position for 2 iterations
az2, el2, slew_time = estimate_slew(self.time + slew_time)
az2, el2, slew_time = estimate_slew(self.time + slew_time)
# Ensure slew does not take longer than timeout
slew_time = min(slew_time, timeout)
# If source is below horizon, handle timeout and potential rise in that interval
if el2 < self.el_limit:
# Position after timeout
az_after_timeout, el_after_timeout = self._azel(target, self.time + timeout, ant)
# If source is still down, slew time == timeout, else estimate rise time through linear interpolation
slew_time = (self.el_limit - el1) / (el_after_timeout - el1) * timeout \
if el_after_timeout > self.el_limit else timeout
az2, el2 = self._azel(target, self.time + slew_time, ant)
el2 = max(el2, self.el_limit)
slew_times.append(slew_time)
# print "%s slewing from (%.1f, %.1f) to (%.1f, %.1f) in %.1f seconds" % \
# (ant.name, ant_az, ant_el, az2, el2, slew_time)
# The overall slew time is the max for all antennas - adjust current time to reflect the slew
self.time += (np.max(slew_times) if len(slew_times) > 0 else 0.)
# Blindly assume all antennas are on target (or on horizon) after this interval
self._teleport_to(target, mode)
def get_centre_freq(self):
"""Get RF (sky) frequency associated with middle CBF channel.
Returns
-------
centre_freq : float
Actual centre frequency in MHz
"""
return 1284.0
def set_centre_freq(self, centre_freq):
"""Set RF (sky) frequency associated with middle CBF channel.
Parameters
----------
centre_freq : float
Desired centre frequency in MHz
"""
pass
def standard_setup(self, observer, description, experiment_id=None,
centre_freq=None, nd_params=None,
stow_when_done=None, horizon=None, no_mask=False, **kwargs):
"""Perform basic experimental setup including antennas, LO and dump rate."""
self.ants = ant_array(self.kat, self.get_ant_names())
for ant in self.ants:
try:
self._fake_ants.append((katpoint.Antenna(ant.sensor.observer.get_value()),
ant.sensor.mode.get_value(),
ant.sensor.pos_actual_scan_azim.get_value(),
ant.sensor.pos_actual_scan_elev.get_value()))
except AttributeError:
pass
# Override provided session parameters (or initialize them from existing parameters if not provided)
self.experiment_id = experiment_id = self.experiment_id if experiment_id is None else experiment_id
self.nd_params = nd_params = self.nd_params if nd_params is None else nd_params
self.stow_when_done = stow_when_done = self.stow_when_done if stow_when_done is None else stow_when_done
self.horizon = self.horizon if horizon is None else horizon
user_logger.info('Antennas used = %s' % (' '.join([ant[0].name for ant in self._fake_ants]),))
user_logger.info('Observer = %s' % (observer,))
user_logger.info("Description ='%s'" % (description,))
user_logger.info('Experiment ID = %s' % (experiment_id,))
# There is no way to find out the centre frequency in this fake session... maybe
centre_freq = self.get_centre_freq()
if centre_freq is None:
user_logger.info('RF centre frequency = unknown to simulator, dump rate = %g Hz' % (1.0 / self.dump_period,))
else:
user_logger.info('RF centre frequency = %g MHz, dump rate = %g Hz' % (centre_freq, 1.0 / self.dump_period))
if nd_params['period'] > 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s, every %g s if possible" % \
(nd_params['diode'], nd_params['on'], nd_params['off'], nd_params['period'])
elif nd_params['period'] == 0:
nd_info = "Will switch '%s' noise diode on for %g s and off for %g s at every opportunity" % \
(nd_params['diode'], nd_params['on'], nd_params['off'])
else:
nd_info = "Noise diode will not fire automatically"
user_logger.info(nd_info + " while performing canned commands")
user_logger.info('--------------------------')
def capture_start(self):
"""Starting capture has no timing effect."""
pass
def label(self, label):
"""Adding label has no timing effect."""
if label:
user_logger.info("New compound scan: '%s'" % (label,))
def on_target(self, target):
"""Determine whether antennas are tracking a given target."""
if not self._fake_ants:
return False
for antenna, mode, ant_az, ant_el in self._fake_ants:
az, el = self._azel(target, self.time, antenna)
# Checking for lock and checking for target identity considered the same thing
if (az != ant_az) or (el != ant_el) or (mode != 'POINT'):
return False
return True
def target_visible(self, target, duration=0., timeout=300., operation='scan'):
"""Check whether target is visible for given duration."""
if not self._fake_ants:
return False
# Convert description string to target object, or keep object as is
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
horizon = katpoint.deg2rad(self.horizon)
# Include an average time to slew to the target (worst case about 90 seconds, so half that)
now = self.time + 45.
average_el, visible_before, visible_after = [], [], []
for antenna, mode, ant_az, ant_el in self._fake_ants:
az, el = target.azel(now, antenna)
average_el.append(katpoint.rad2deg(el))
# If not up yet, see if the target will pop out before the timeout
if el < horizon:
now += timeout
az, el = target.azel(now, antenna)
visible_before.append(el >= horizon)
# Check what happens at end of observation
az, el = target.azel(now + duration, antenna)
visible_after.append(el >= horizon)
if all(visible_before) and all(visible_after):
return True
always_invisible = any(~np.array(visible_before) & ~np.array(visible_after))
if always_invisible:
user_logger.warning("Target '%s' is never up during requested period (average elevation is %g degrees)" %
(target.name, np.mean(average_el)))
else:
user_logger.warning("Target '%s' will rise or set during requested period" % (target.name,))
return False
def fire_noise_diode(self, diode='coupler', on=10.0, off=10.0, period=0.0, align=True, announce=True):
"""Estimate time taken to fire noise diode."""
return False
# XXX needs a rethink
# if not self._fake_ants:
# raise ValueError('No antennas specified for session - please run session.standard_setup first')
# if self.dump_period == 0.0:
# # Wait for the first correlator dump to appear
# user_logger.info('waiting for correlator dump to arrive')
# self.dump_period = self._requested_dump_period
# time.sleep(self.dump_period)
# user_logger.info('correlator dump arrived')
# if period < 0.0 or (self.time - self.last_nd_firing) < period:
# return False
# if announce:
# user_logger.info("Firing '%s' noise diode (%g seconds on, %g seconds off)" % (diode, on, off))
# else:
# user_logger.info('firing noise diode')
# self.time += on
# self.last_nd_firing = self.time + 0.
# self.time += off
# user_logger.info('fired noise diode')
# return True
def set_target(self, target):
"""Setting target has no timing effect."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
def track(self, target, duration=20.0, announce=True):
"""Estimate time taken to perform track."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating %g-second track on target '%s'" % (duration, target.name))
if not self.target_visible(target, duration):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
self.fire_noise_diode(announce=False, **self.nd_params)
if not self.on_target(target):
user_logger.info('slewing to target')
self._slew_to(target)
user_logger.info('target reached')
self.fire_noise_diode(announce=False, **self.nd_params)
user_logger.info('tracking target')
self.time += duration + 1.0
user_logger.info('target tracked for %g seconds' % (duration,))
self.fire_noise_diode(announce=False, **self.nd_params)
self._teleport_to(target)
return True
def scan(self, target, duration=30.0, start=(-3.0, 0.0), end=(3.0, 0.0),
index=-1, projection=default_proj, announce=True):
"""Estimate time taken to perform single linear scan."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
scan_name = 'scan' if index < 0 else 'scan %d' % (index,)
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
if announce:
user_logger.info("Initiating %g-second scan across target '%s'" % (duration, target.name))
if not self.target_visible(target, duration):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
self.fire_noise_diode(announce=False, **self.nd_params)
projection = Offset.PROJECTIONS[projection]
self.projection = (projection, start[0], start[1])
user_logger.info('slewing to start of %s' % (scan_name,))
self._slew_to(target, mode='SCAN')
user_logger.info('start of %s reached' % (scan_name,))
self.fire_noise_diode(announce=False, **self.nd_params)
# Assume antennas can keep up with target (and doesn't scan too fast either)
user_logger.info('performing %s' % (scan_name,))
self.time += duration + 1.0
user_logger.info('%s complete' % (scan_name,))
self.fire_noise_diode(announce=False, **self.nd_params)
self.projection = (projection, end[0], end[1])
self._teleport_to(target)
return True
def raster_scan(self, target, num_scans=3, scan_duration=30.0, scan_extent=6.0, scan_spacing=0.5,
scan_in_azimuth=True, projection=default_proj, announce=True):
"""Estimate time taken to perform raster scan."""
if not self._fake_ants:
raise ValueError('No antennas specified for session - please run session.standard_setup first')
target = target if isinstance(target, katpoint.Target) else katpoint.Target(target)
projection = Offset.PROJECTIONS[projection]
if announce:
user_logger.info("Initiating raster scan (%d %g-second scans extending %g degrees) on target '%s'" %
(num_scans, scan_duration, scan_extent, target.name))
nd_time = self.nd_params['on'] + self.nd_params['off']
nd_time *= scan_duration / max(self.nd_params['period'], scan_duration)
nd_time = nd_time if self.nd_params['period'] >= 0 else 0.
if not self.target_visible(target, (scan_duration + nd_time) * num_scans):
user_logger.warning("Skipping track, as target '%s' will be below horizon" % (target.name,))
return False
# Create start and end positions of each scan, based on scan parameters
scan_levels = np.arange(-(num_scans // 2), num_scans // 2 + 1)
scanning_coord = (scan_extent / 2.0) * (-1) ** scan_levels
stepping_coord = scan_spacing * scan_levels
# Flip sign of elevation offsets to ensure that the first scan always starts at the top left of target
scan_starts = zip(scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, -scanning_coord)
scan_ends = zip(-scanning_coord, -stepping_coord) if scan_in_azimuth else zip(stepping_coord, scanning_coord)
self.fire_noise_diode(announce=False, **self.nd_params)
# Perform multiple scans across the target
for scan_index, (start, end) in enumerate(zip(scan_starts, scan_ends)):
self.projection = (projection, start[0], start[1])
user_logger.info('slewing to start of scan %d' % (scan_index,))
self._slew_to(target, mode='SCAN')
user_logger.info('start of scan %d reached' % (scan_index,))
self.fire_noise_diode(announce=False, **self.nd_params)
# Assume antennas can keep up with target (and doesn't scan too fast either)
user_logger.info('performing scan %d' % (scan_index,))
self.time += scan_duration + 1.0
user_logger.info('scan %d complete' % (scan_index,))
self.fire_noise_diode(announce=False, **self.nd_params)
self.projection = (projection, end[0], end[1])
self._teleport_to(target)
return True
def end(self):
"""Stop data capturing to shut down the session and close the data file."""
user_logger.info('Scans complete, no data captured as this is a timing simulation...')
user_logger.info('Ended data capturing session with experiment ID %s' % (self.experiment_id,))
activity_logger.info('Timing simulation. Ended data capturing session with experiment ID %s' % (self.experiment_id,))
if self.stow_when_done and self._fake_ants:
user_logger.info("Stowing dishes.")
activity_logger.info('Timing simulation. Stowing dishes.')
self._teleport_to(katpoint.Target("azel, 0.0, 90.0"), mode="STOW")
user_logger.info('==========================')
duration = self.time - self.start_time
# Let KATCoreConn know how long the estimated observation time was.
self.kat.set_estimated_duration(duration)
if duration <= 100:
duration = '%d seconds' % (np.ceil(duration),)
elif duration <= 100 * 60:
duration = '%d minutes' % (np.ceil(duration / 60.),)
else:
duration = '%.1f hours' % (duration / 3600.,)
msg = "Experiment estimated to last %s until this time" % (duration,)
user_logger.info(msg + "\n")
activity_logger.info("Timing simulation. %s" % (msg,))
# Restore time module functions
time.time, time.sleep = self._realtime, self._realsleep
# Restore logging
for handler in user_logger.handlers:
if isinstance(handler, logging.StreamHandler):
handler.formatter.datefmt = handler.formatter.old_datefmt
del handler.formatter.old_datefmt
else:
handler.setLevel(handler.old_level)
del handler.old_level
activity_logger.info("Timing simulation. ----- Script ended %s (%s). Output file None" % (sys.argv[0], ' '.join(sys.argv[1:])))
| 50.14862 | 185 | 0.62869 | 9,177 | 70,860 | 4.738259 | 0.106789 | 0.024147 | 0.025113 | 0.007589 | 0.554332 | 0.51473 | 0.485408 | 0.463721 | 0.43279 | 0.410873 | 0 | 0.007403 | 0.277533 | 70,860 | 1,412 | 186 | 50.184136 | 0.841974 | 0.203274 | 0 | 0.525203 | 0 | 0.006504 | 0.145587 | 0.015092 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.00813 | 0.017886 | null | null | 0.00813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e17fa16cc2830c70bdb6cc63e17b12437230ec42 | 499 | py | Python | projects/webptspy/apps/account/admin.py | codelieche/testing | 1f4a3393f761654d98588c9ba90596a307fa59db | [
"MIT"
] | 2 | 2017-08-10T03:40:22.000Z | 2017-08-17T13:20:16.000Z | projects/webptspy/apps/account/admin.py | codelieche/webpts | 1f4a3393f761654d98588c9ba90596a307fa59db | [
"MIT"
] | null | null | null | projects/webptspy/apps/account/admin.py | codelieche/webpts | 1f4a3393f761654d98588c9ba90596a307fa59db | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from django.contrib import admin
from .models import UserProfile
# Register your models here.
class UserProfileModelAdmin(admin.ModelAdmin):
"""
用户管理Model
"""
list_display = ('id', 'username', 'nike_name', 'mobile',
'email', 'is_active')
list_filter = ('is_active',)
list_display_links = ('id', 'username')
search_fields = ('username', 'email', 'mobile', 'nike_name')
admin.site.register(UserProfile, UserProfileModelAdmin)
| 26.263158 | 64 | 0.653307 | 52 | 499 | 6.096154 | 0.615385 | 0.069401 | 0.07571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002488 | 0.194389 | 499 | 18 | 65 | 27.722222 | 0.78607 | 0.116232 | 0 | 0 | 0 | 0 | 0.20283 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.777778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
e183076e1912547a48c02bb69c7456b82ec312ba | 852 | py | Python | qualification_round_2017/C.py | asukakenji/codejam2018 | a519f522337d7faf3d07a84f6e24f0161f95c880 | [
"MIT"
] | null | null | null | qualification_round_2017/C.py | asukakenji/codejam2018 | a519f522337d7faf3d07a84f6e24f0161f95c880 | [
"MIT"
] | null | null | null | qualification_round_2017/C.py | asukakenji/codejam2018 | a519f522337d7faf3d07a84f6e24f0161f95c880 | [
"MIT"
] | null | null | null | # code jam: Qualification Round 2017: Problem C. Bathroom Stalls
def read_int():
return int(raw_input())
def read_int_n():
return map(int, raw_input().split())
def get_y_z(n, k):
if k == 1:
if n & 1 == 0:
# Even Number
return n >> 1, (n >> 1) - 1
else:
# Odd Number
return n >> 1, n >> 1
else:
if n & 1 == 0:
# Even Number
if k & 1 == 0:
# Even Number
return get_y_z(n >> 1, k >> 1)
else:
# Odd Number
return get_y_z((n >> 1) - 1, k >> 1)
else:
# Odd Number
return get_y_z(n >> 1, k >> 1)
T = read_int()
x = 1
while x <= T:
N, K = read_int_n()
y, z = get_y_z(N, K)
print 'Case #{}: {} {}'.format(x, y, z)
x += 1
| 23.027027 | 64 | 0.419014 | 127 | 852 | 2.669291 | 0.275591 | 0.053097 | 0.073746 | 0.088496 | 0.477876 | 0.39528 | 0.230089 | 0.230089 | 0.230089 | 0.171091 | 0 | 0.052301 | 0.438967 | 852 | 36 | 65 | 23.666667 | 0.656904 | 0.153756 | 0 | 0.32 | 0 | 0 | 0.021038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e1842583bfd3115c7825344cdde05a9fbfaf3644 | 1,143 | py | Python | tests/integration/modules/test_vmc_vm_stats.py | kdsalvy/salt-ext-modules-vmware-1 | 9fdc941692e4c526f575f33b2ce23c1470582934 | [
"Apache-2.0"
] | 10 | 2021-11-02T20:24:44.000Z | 2022-03-11T05:54:27.000Z | tests/integration/modules/test_vmc_vm_stats.py | waynew/salt-ext-modules-vmware | 9f693382772061676c846c850df6ff508b7f3a91 | [
"Apache-2.0"
] | 83 | 2021-10-01T15:13:02.000Z | 2022-03-31T16:22:40.000Z | tests/integration/modules/test_vmc_vm_stats.py | waynew/salt-ext-modules-vmware | 9f693382772061676c846c850df6ff508b7f3a91 | [
"Apache-2.0"
] | 15 | 2021-09-30T23:17:27.000Z | 2022-03-23T06:54:22.000Z | """
Integration Tests for vmc_vm_stats execution module
"""
import pytest
@pytest.fixture
def vm_id(salt_call_cli, vmc_vcenter_connect):
ret = salt_call_cli.run("vmc_sddc.get_vms", **vmc_vcenter_connect)
vm_obj = ret.json[0]
return vm_obj["vm"]
def test_get_cpu_stats_for_vm_smoke_test(salt_call_cli, vmc_vcenter_connect, vm_id):
ret = salt_call_cli.run(
"vmc_vm_stats.get", vm_id=vm_id, stats_type="cpu", **vmc_vcenter_connect
)
assert ret is not None
assert "error" not in ret.json
def test_get_memory_stats_for_vm_smoke_test(salt_call_cli, vmc_vcenter_connect, vm_id):
ret = salt_call_cli.run(
"vmc_vm_stats.get", vm_id=vm_id, stats_type="memory", **vmc_vcenter_connect
)
assert ret is not None
assert "error" not in ret.json
def test_get_memory_stats_when_vm_does_not_exist(salt_call_cli, vmc_vcenter_connect):
ret = salt_call_cli.run(
"vmc_vm_stats.get", vm_id="vm-abc", stats_type="memory", **vmc_vcenter_connect
)
assert ret is not None
result = ret.json
assert "error" in result
assert result["error"]["error_type"] == "NOT_FOUND"
| 30.078947 | 87 | 0.724409 | 190 | 1,143 | 3.942105 | 0.231579 | 0.042724 | 0.11749 | 0.074766 | 0.690254 | 0.690254 | 0.690254 | 0.690254 | 0.690254 | 0.690254 | 0 | 0.001058 | 0.173228 | 1,143 | 37 | 88 | 30.891892 | 0.791534 | 0.044619 | 0 | 0.307692 | 0 | 0 | 0.116667 | 0 | 0 | 0 | 0 | 0 | 0.269231 | 1 | 0.153846 | false | 0 | 0.038462 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e1927fcb892725b69d50542f139eaa6330088fdc | 14,716 | py | Python | tracing/plugins/ath10k_pktlog.py | lumag/qca-swiss-army-knife | 5ede3cc07e9a52f115101c28f833242b772eeaab | [
"ISC"
] | 47 | 2016-05-20T02:33:26.000Z | 2022-03-02T01:48:57.000Z | tracing/plugins/ath10k_pktlog.py | lumag/qca-swiss-army-knife | 5ede3cc07e9a52f115101c28f833242b772eeaab | [
"ISC"
] | 7 | 2020-04-09T13:40:56.000Z | 2022-01-24T19:18:50.000Z | tracing/plugins/ath10k_pktlog.py | lumag/qca-swiss-army-knife | 5ede3cc07e9a52f115101c28f833242b772eeaab | [
"ISC"
] | 41 | 2016-04-19T06:31:14.000Z | 2022-03-30T06:25:09.000Z | #
# Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# trace-cmd pktlog plugin for ath10k, QCA Linux wireless driver
#
# TODO:
#
# o create class for struct ieee80211_hdr each packet headers with
# pack() and unpack() methods
import struct
import binascii
DEBUG = 1
CUR_PKTLOG_VER = 10010
PKTLOG_MAGIC_NUM = 7735225
IEEE80211_FCTL_TODS = 0x0100
IEEE80211_FCTL_FROMDS = 0x0200
TARGET_NUM_MSDU_DESC = (1024 + 400)
MAX_PKT_INFO_MSDU_ID = 192
MAX_10_4_PKT_INFO_MSDU_ID = 1
PKTLOG_MAX_TXCTL_WORDS = 57
# must match with enum ath10k_hw_rev from ath10k and existing values
# should not change
ATH10K_PKTLOG_HW_QCA988X = 0
ATH10K_PKTLOG_HW_QCA6174 = 1
ATH10K_PKTLOG_HW_QCA99X0 = 2
ATH10K_PKTLOG_HW_QCA9888 = 3
ATH10K_PKTLOG_HW_QCA9984 = 4
ATH10K_PKTLOG_HW_QCA9377 = 5
ATH10K_PKTLOG_HW_QCA40XX = 6
ATH10K_PKTLOG_HW_QCA9887 = 7
ATH10K_PKTLOG_TYPE_TX_CTRL = 1
ATH10K_PKTLOG_TYPE_TX_STAT = 2
ATH10K_PKTLOG_TYPE_TX_MSDU_ID = 3
ATH10K_PKTLOG_TYPE_TX_FRM_HDR = 4
ATH10K_PKTLOG_TYPE_RX_STAT = 5
ATH10K_PKTLOG_TYPE_RC_FIND = 6
ATH10K_PKTLOG_TYPE_RC_UPDATE = 7
ATH10K_PKTLOG_TYPE_TX_VIRT_ADDR = 8
ATH10K_PKTLOG_TYPE_DBG_PRINT = 9
ATH10K_PKTLOG_FLG_TYPE_LOCAL_S = 0
ATH10K_PKTLOG_FLG_TYPE_REMOTE_S = 1
ATH10K_PKTLOG_FLG_TYPE_CLONE_S = 2
ATH10K_PKTLOG_FLG_TYPE_UNKNOWN_S = 3
# sizeof(ath10k_pktlog_txctl) = 12 + 4 * 57
ATH10K_PKTLOG_TXCTL_LEN = 240
ATH10K_PKTLOG_MAX_TXCTL_WORDS = 57
# sizeof(ath10k_pktlog_10_4_txctl)2 = 16 + 4 * 153
ATH10K_PKTLOG_10_4_TXCTL_LEN = 624
ATH10K_PKTLOG_10_4_MAX_TXCTL_WORDS = 153
msdu_len_tbl = {}
output_file = None
frm_hdr = None
def dbg(msg):
if DEBUG == 0:
return
print msg
def hexdump(buf, prefix=None):
s = binascii.b2a_hex(buf)
s_len = len(s)
result = ""
if prefix is None:
prefix = ""
for i in range(s_len / 2):
if i % 16 == 0:
result = result + ("%s%04x: " % (prefix, i))
result = result + (s[2 * i] + s[2 * i + 1] + " ")
if (i + 1) % 16 == 0:
result = result + "\n"
# FIXME: if len(s) % 16 == 0 there's an extra \n in the end
return result
# struct ath10k_pktlog_hdr {
# unsigned short flags;
# unsigned short missed_cnt;
# unsigned short log_type;
# unsigned short size;
# unsigned int timestamp;
# unsigned char payload[0];
# } __attribute__((__packed__));
class Ath10kPktlogHdr:
# 2 + 2 + 2 + 2 + 4 = 12
hdr_len = 12
struct_fmt = '<HHHHI'
def unpack(self, buf, offset=0):
(self.flags, self.missed_cnt, self.log_type,
self.size, self.timestamp) = struct.unpack_from(self.struct_fmt, buf, 0)
payload_len = len(buf) - self.hdr_len
if payload_len < self.size:
raise Exception('Payload length invalid: %d != %d' %
(payload_len, self.size))
self.payload = buf[self.hdr_len:]
# excludes payload, you have to write that separately!
def pack(self):
return struct.pack(self.struct_fmt,
self.flags,
self.missed_cnt,
self.log_type,
self.size,
self.timestamp)
def __str__(self):
return 'flags %04x miss %d log_type %d size %d timestamp %d\n' % \
(self.flags, self.missed_cnt,
self.log_type, self.size, self.timestamp)
def __init__(self):
self.flags = 0
self.missed_cnt = 0
self.log_type = 0
self.size = 0
self.timestamp = 0
self.payload = []
# struct ath10k_pktlog_10_4_hdr {
# unsigned short flags;
# unsigned short missed_cnt;
# unsigned short log_type;
# unsigned short size;
# unsigned int timestamp;
# unsigned int type_specific_data;
# unsigned char payload[0];
# } __attribute__((__packed__));
class Ath10kPktlog_10_4_Hdr:
# 2 + 2 + 2 + 2 + 4 + 4 = 16
hdr_len = 16
struct_fmt = '<HHHHII'
def unpack(self, buf, offset=0):
(self.flags, self.missed_cnt, self.log_type,
self.size, self.timestamp, self.type_specific_data) = struct.unpack_from(self.struct_fmt, buf, 0)
payload_len = len(buf) - self.hdr_len
if payload_len != self.size:
raise Exception('Payload length invalid: %d != %d' %
(payload_len, self.size))
self.payload = buf[self.hdr_len:]
# excludes payload, you have to write that separately!
def pack(self):
return struct.pack(self.struct_fmt,
self.flags,
self.missed_cnt,
self.log_type,
self.size,
self.timestamp,
self.type_specific_data)
def __str__(self):
return 'flags %04x miss %d log_type %d size %d timestamp %d type_specific_data %d\n' % \
(self.flags, self.missed_cnt, self.log_type,
self.size, self.timestamp, self.type_specific_data)
def __init__(self):
self.flags = 0
self.missed_cnt = 0
self.log_type = 0
self.size = 0
self.timestamp = 0
self.type_specific_data = 0
self.payload = []
def output_open():
global output_file
# apparently no way to close the file as the python plugin doesn't
# have unregister() callback
output_file = open('pktlog.dat', 'wb')
buf = struct.pack('II', PKTLOG_MAGIC_NUM, CUR_PKTLOG_VER)
output_write(buf)
def output_write(buf):
global output_file
output_file.write(buf)
def pktlog_tx_frm_hdr(frame):
global frm_hdr
try:
# struct ieee80211_hdr
(frame_control, duration_id, addr1a, addr1b, addr1c, addr2a, addr2b, addr2c,
addr3a, addr3b, addr3c, seq_ctrl) = struct.unpack_from('<HHI2BI2BI2BH', frame, 0)
except struct.error as e:
dbg('failed to parse struct ieee80211_hdr: %s' % (e))
return
if frame_control & IEEE80211_FCTL_TODS:
bssid_tail = (addr1b << 8) | addr1c
sa_tail = (addr2b << 8) | addr2c
da_tail = (addr3b << 8) | addr3c
elif frame_control & IEEE80211_FCTL_FROMDS:
bssid_tail = (addr2b << 8) | addr2c
sa_tail = (addr3b << 8) | addr3c
da_tail = (addr1b << 8) | addr1c
else:
bssid_tail = (addr3b << 8) | addr3c
sa_tail = (addr2b << 8) | addr2c
da_tail = (addr1b << 8) | addr1c
resvd = 0
frm_hdr = struct.pack('HHHHHH', frame_control, seq_ctrl, bssid_tail,
sa_tail, da_tail, resvd)
dbg('frm_hdr %d B' % len(frm_hdr))
def pktlog_tx_ctrl(buf, hw_type):
global frm_hdr
if hw_type == ATH10K_PKTLOG_HW_QCA988X:
hdr = Ath10kPktlogHdr()
hdr.unpack(buf)
hdr.size = ATH10K_PKTLOG_TXCTL_LEN
num_txctls = ATH10K_PKTLOG_MAX_TXCTL_WORDS
elif hw_type in [ATH10K_PKTLOG_HW_QCA99X0, ATH10K_PKTLOG_HW_QCA40XX,
ATH10K_PKTLOG_HW_QCA9888, ATH10K_PKTLOG_HW_QCA9984]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.unpack(buf)
hdr.size = ATH10K_PKTLOG_10_4_TXCTL_LEN
num_txctls = ATH10K_PKTLOG_10_4_MAX_TXCTL_WORDS
output_write(hdr.pack())
# write struct ath10k_pktlog_frame
if frm_hdr:
output_write(frm_hdr)
else:
tmp = struct.pack('HHHHHH', 0, 0, 0, 0, 0, 0)
output_write(tmp)
txdesc_ctl = hdr.payload[0:]
for i in range(num_txctls):
if len(txdesc_ctl) >= 4:
txctl, = struct.unpack_from('<I', txdesc_ctl)
txdesc_ctl = txdesc_ctl[4:]
else:
txctl = 0
output_write(struct.pack('I', txctl))
def pktlog_tx_msdu_id(buf, hw_type):
global msdu_len_tbl
if hw_type == ATH10K_PKTLOG_HW_QCA988X:
hdr = Ath10kPktlogHdr()
hdr.unpack(buf)
hdr.size = 4 + (192 / 8) + 2 * 192
# write struct ath10k_pktlog_hdr
output_write(hdr.pack())
# parse struct msdu_id_info
# hdr (12) + num_msdu (4) + bound_bmap (24) = 40
msdu_info = hdr.payload[0:28]
id = hdr.payload[28:]
num_msdu, = struct.unpack_from('I', msdu_info)
output_write(msdu_info)
max_pkt_info_msdu_id = MAX_PKT_INFO_MSDU_ID
elif hw_type in [ATH10K_PKTLOG_HW_QCA99X0, ATH10K_PKTLOG_HW_QCA40XX,
ATH10K_PKTLOG_HW_QCA9888, ATH10K_PKTLOG_HW_QCA9984]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.unpack(buf)
# write struct ath10k_pktlog_10_4_hdr
output_write(hdr.pack())
# parse struct msdu_id_info
# hdr (16) + num_msdu (4) + bound_bmap (1) = 21
msdu_info = hdr.payload[0:5]
id = hdr.payload[5:]
num_msdu, = struct.unpack_from('I', msdu_info)
output_write(msdu_info)
max_pkt_info_msdu_id = MAX_10_4_PKT_INFO_MSDU_ID
for i in range(max_pkt_info_msdu_id):
if num_msdu > 0:
num_msdu = num_msdu - 1
msdu_id, = struct.unpack_from('<H', id)
id = id[2:]
if msdu_id not in msdu_len_tbl:
dbg('msdu_id %d not found from msdu_len_tbl' % (msdu_id))
msdu_len = 0
else:
msdu_len = msdu_len_tbl[msdu_id]
else:
msdu_len = 0
output_write(struct.pack('H', msdu_len))
def ath10k_htt_pktlog_handler(pevent, trace_seq, event):
hw_type = int(event.get('hw_type', ATH10K_PKTLOG_HW_QCA988X))
buf = event['pktlog'].data
offset = 0
if hw_type == ATH10K_PKTLOG_HW_QCA988X:
hdr = Ath10kPktlogHdr()
elif hw_type in [ATH10K_PKTLOG_HW_QCA99X0, ATH10K_PKTLOG_HW_QCA40XX,
ATH10K_PKTLOG_HW_QCA9888, ATH10K_PKTLOG_HW_QCA9984]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.unpack(buf, offset)
offset = offset + hdr.hdr_len
trace_seq.puts('%s\n' % (hdr))
if hdr.log_type == ATH10K_PKTLOG_TYPE_TX_FRM_HDR:
pktlog_tx_frm_hdr(buf[hdr.hdr_len:])
elif hdr.log_type == ATH10K_PKTLOG_TYPE_TX_CTRL:
pktlog_tx_ctrl(buf, hw_type)
elif hdr.log_type == ATH10K_PKTLOG_TYPE_TX_MSDU_ID:
pktlog_tx_msdu_id(buf, hw_type)
elif hdr.log_type == ATH10K_PKTLOG_TYPE_TX_STAT or \
hdr.log_type == ATH10K_PKTLOG_TYPE_RX_STAT or \
hdr.log_type == ATH10K_PKTLOG_TYPE_RC_FIND or \
hdr.log_type == ATH10K_PKTLOG_TYPE_RC_UPDATE:
output_write(buf[0: offset + hdr.size])
else:
pass
def ath10k_htt_rx_desc_handler(pevent, trace_seq, event):
hw_type = int(event.get('hw_type', ATH10K_PKTLOG_HW_QCA988X))
rxdesc = event['rxdesc'].data
trace_seq.puts('len %d\n' % (len(rxdesc)))
if hw_type == ATH10K_PKTLOG_HW_QCA988X:
hdr = Ath10kPktlogHdr()
hdr.flags = (1 << ATH10K_PKTLOG_FLG_TYPE_REMOTE_S)
hdr.missed_cnt = 0
hdr.log_type = ATH10K_PKTLOG_TYPE_RX_STAT
# rx_desc size for QCA988x chipsets is 248
hdr.size = 248
output_write(hdr.pack())
output_write(rxdesc[0: 32])
output_write(rxdesc[36: 56])
output_write(rxdesc[76: 208])
output_write(rxdesc[228:])
elif hw_type in [ATH10K_PKTLOG_HW_QCA99X0, ATH10K_PKTLOG_HW_QCA40XX]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.flags = (1 << ATH10K_PKTLOG_FLG_TYPE_REMOTE_S)
hdr.missed_cnt = 0
hdr.log_type = ATH10K_PKTLOG_TYPE_RX_STAT
hdr.type_specific_data = 0
hdr.size = len(rxdesc)
output_write(hdr.pack())
output_write(rxdesc)
elif hw_type in [ATH10K_PKTLOG_HW_QCA9888, ATH10K_PKTLOG_HW_QCA9984]:
hdr = Ath10kPktlog_10_4_Hdr()
hdr.flags = (1 << ATH10K_PKTLOG_FLG_TYPE_REMOTE_S)
hdr.missed_cnt = 0
hdr.log_type = ATH10K_PKTLOG_TYPE_RX_STAT
hdr.type_specific_data = 0
# rx_desc size for QCA9984 and QCA9889 chipsets is 296
hdr.size = 296
output_write(hdr.pack())
output_write(rxdesc[0: 4])
output_write(rxdesc[4: 8])
output_write(rxdesc[12: 24])
output_write(rxdesc[24: 40])
output_write(rxdesc[44: 84])
output_write(rxdesc[100: 104])
output_write(rxdesc[104: 144])
output_write(rxdesc[144: 256])
output_write(rxdesc[292:])
def ath10k_htt_tx_handler(pevent, trace_seq, event):
global msdu_len_tbl
msdu_id = long(event['msdu_id'])
msdu_len = long(event['msdu_len'])
trace_seq.puts('msdu_id %d msdu_len %d\n' % (msdu_id, msdu_len))
if msdu_id > TARGET_NUM_MSDU_DESC:
dbg('Invalid msdu_id in tx: %d' % (msdu_id))
return
msdu_len_tbl[msdu_id] = msdu_len
def ath10k_txrx_tx_unref_handler(pevent, trace_seq, event):
global msdu_len_tbl
msdu_id = long(event['msdu_id'])
trace_seq.puts('msdu_id %d\n' % (msdu_id))
if msdu_id > TARGET_NUM_MSDU_DESC:
dbg('Invalid msdu_id from unref: %d' % (msdu_id))
return
msdu_len_tbl[msdu_id] = 0
def ath10k_tx_hdr_handler(pevent, trace_seq, event):
buf = event['data'].data
pktlog_tx_frm_hdr(buf[0:])
def register(pevent):
output_open()
pevent.register_event_handler("ath10k", "ath10k_htt_pktlog",
lambda *args:
ath10k_htt_pktlog_handler(pevent, *args))
pevent.register_event_handler("ath10k", "ath10k_htt_rx_desc",
lambda *args:
ath10k_htt_rx_desc_handler(pevent, *args))
pevent.register_event_handler("ath10k", "ath10k_htt_tx",
lambda *args:
ath10k_htt_tx_handler(pevent, *args))
pevent.register_event_handler("ath10k", "ath10k_txrx_tx_unref",
lambda *args:
ath10k_txrx_tx_unref_handler(pevent, *args))
pevent.register_event_handler("ath10k", "ath10k_tx_hdr",
lambda *args:
ath10k_tx_hdr_handler(pevent, *args))
| 31.177966 | 106 | 0.632509 | 2,068 | 14,716 | 4.162959 | 0.152805 | 0.098966 | 0.048786 | 0.018585 | 0.573702 | 0.5018 | 0.462888 | 0.413288 | 0.389825 | 0.364851 | 0 | 0.066348 | 0.27589 | 14,716 | 471 | 107 | 31.244161 | 0.741554 | 0.149769 | 0 | 0.380192 | 0 | 0 | 0.049711 | 0 | 0 | 0 | 0.000964 | 0.002123 | 0 | 0 | null | null | 0.003195 | 0.00639 | null | null | 0.003195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e195525885c756f6c1eaa22f28ac15deda8bb369 | 2,886 | py | Python | bob/blitz/extension.py | bioidiap/bob.blitz | 348d7cf3866b549cac576efc3c6f3df24245d9fd | [
"BSD-3-Clause"
] | null | null | null | bob/blitz/extension.py | bioidiap/bob.blitz | 348d7cf3866b549cac576efc3c6f3df24245d9fd | [
"BSD-3-Clause"
] | 6 | 2015-01-01T09:15:28.000Z | 2016-10-20T08:09:26.000Z | bob/blitz/extension.py | bioidiap/bob.blitz | 348d7cf3866b549cac576efc3c6f3df24245d9fd | [
"BSD-3-Clause"
] | 3 | 2015-08-05T12:16:45.000Z | 2018-02-01T19:55:40.000Z | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Mon 18 Nov 21:38:19 2013
"""Extension building for using this package
"""
import numpy
from pkg_resources import resource_filename
from bob.extension import Extension as BobExtension
# forward the build_ext command from bob.extension
from bob.extension import build_ext, Library as BobLibrary
from distutils.version import LooseVersion
class Extension(BobExtension):
"""Extension building with pkg-config packages and blitz.array.
See the documentation for :py:class:`distutils.extension.Extension` for more
details on input parameters.
"""
def __init__(self, *args, **kwargs):
"""Initialize the extension with parameters.
This extension adds ``blitz>=0.10`` as a requirement for extensions derived
from this class.
See the help for :py:class:`bob.extension.Extension` for more details on
options.
"""
require = ['blitz>=0.10', 'boost']
kwargs.setdefault('packages', []).extend(require)
self_include_dir = resource_filename(__name__, 'include')
kwargs.setdefault('system_include_dirs', []).append(numpy.get_include())
kwargs.setdefault('include_dirs', []).append(self_include_dir)
macros = [
("PY_ARRAY_UNIQUE_SYMBOL", "BOB_NUMPY_C_API"),
("NO_IMPORT_ARRAY", "1"),
]
if LooseVersion(numpy.__version__) >= LooseVersion('1.7'):
macros.append(("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"))
kwargs.setdefault('define_macros', []).extend(macros)
# Run the constructor for the base class
BobExtension.__init__(self, *args, **kwargs)
class Library (BobLibrary):
"""Pure C++ library building with blitz array.
See the documentation for :py:class:`bob.extension.Extension` for more
details on input parameters.
"""
def __init__(self, *args, **kwargs):
"""Initialize the library with parameters.
This library adds ``blitz>=0.10`` as a requirement for library derived
from this class.
See the help for :py:class:`bob.extension.Library` for more details on
options.
"""
require = ['blitz>=0.10', 'boost']
kwargs.setdefault('packages', []).extend(require)
self_include_dir = resource_filename(__name__, 'include')
kwargs.setdefault('system_include_dirs', []).append(numpy.get_include())
kwargs.setdefault('include_dirs', []).append(self_include_dir)
# TODO: are these macros required for pure C++ builds?
macros = [
("PY_ARRAY_UNIQUE_SYMBOL", "BOB_NUMPY_C_API"),
("NO_IMPORT_ARRAY", "1"),
]
if LooseVersion(numpy.__version__) >= LooseVersion('1.7'):
macros.append(("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"))
kwargs.setdefault('define_macros', []).extend(macros)
# Run the constructor for the base class
BobLibrary.__init__(self, *args, **kwargs)
| 31.032258 | 79 | 0.698545 | 370 | 2,886 | 5.216216 | 0.291892 | 0.066321 | 0.020725 | 0.033161 | 0.662176 | 0.662176 | 0.662176 | 0.662176 | 0.596891 | 0.596891 | 0 | 0.014706 | 0.175329 | 2,886 | 92 | 80 | 31.369565 | 0.796218 | 0.372141 | 0 | 0.685714 | 0 | 0 | 0.198606 | 0.049942 | 0 | 0 | 0 | 0.01087 | 0 | 1 | 0.057143 | false | 0 | 0.2 | 0 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.