hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b188f11fc196c24fb2215879e63681e45f8138c
| 5,585
|
py
|
Python
|
amazon/goods_review_thread.py
|
JoanLee0826/amazon
|
13fcbcb0e9e396af6d4b2287c2a1a06fd602ce98
|
[
"MIT"
] | 5
|
2019-09-26T02:39:20.000Z
|
2021-04-05T13:19:49.000Z
|
amazon/goods_review_thread.py
|
JoanLee0826/amazon
|
13fcbcb0e9e396af6d4b2287c2a1a06fd602ce98
|
[
"MIT"
] | null | null | null |
amazon/goods_review_thread.py
|
JoanLee0826/amazon
|
13fcbcb0e9e396af6d4b2287c2a1a06fd602ce98
|
[
"MIT"
] | 3
|
2020-01-08T08:53:32.000Z
|
2021-06-04T17:06:34.000Z
|
import pandas as pd
import requests
from lxml import etree
import re, time, random, datetime
from queue import Queue
import threading
class Review:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36"
}
proxies = {
"http": "http://117.91.131.74:9999",
}
def __init__(self, domain):
self.view_list = []
self.page_list = []
self.url_queue = Queue()
if domain.strip().lower() == 'jp':
self.row_url = "https://www.amazon.co.jp"
elif domain.strip().lower == 'com':
self.row_url = "https://www.amazon.com"
self.s = requests.Session()
self.s.get(url=self.row_url, headers=self.headers, proxies=self.proxies)
def get_review(self, url):
res = self.s.get(url, headers=self.headers, proxies=self.proxies)
if res.status_code != 200:
print("请求出错,状态码为:%s" % res.status_code)
print(res.text)
return
res_html = etree.HTML(res.text)
# 商品评价名称
view_goods = res_html.xpath('//span[@class="a-list-item"]/a/text()')[0]
# 商品评价容器
view_con = res_html.xpath('//div[@class="a-section review aok-relative"]')
for each_view in view_con:
# 评价人
view_name = each_view.xpath('.//span[@class="a-profile-name"]/text()')[0]
view_star_raw = each_view.xpath('.//div[@class="a-row"]/a[@class="a-link-normal"]/@title')[0]
# 评价星级
view_star = view_star_raw.split(' ')[0]
# 评价title
view_title = each_view.xpath('.//a[@data-hook="review-title"]/span/text()')[0]
# 评价日期
view_date = each_view.xpath('.//span[@data-hook="review-date"]/text()')[0]
view_format = each_view.xpath('.//a[@data-hook="format-strip"]/text()')
view_colour = None
view_size = None
try:
for each in view_format:
if re.search("color|colour|色", each, re.I):
view_colour = each.split(':')[1].strip()
if re.search("size|style|サイズ", each, re.I):
view_size = each.split(":")[1].strip()
except:
pass
# 评价内容
view_body = each_view.xpath('string(.//span[@data-hook="review-body"]/span)')
# 评价有用数量
try:
view_useful_raw = each_view.xpath('.//span[@data-hook="helpful-vote-statement"]/text()')[0]
view_useful = view_useful_raw.split(' ')[0]
if view_useful == 'one':
view_useful = 1
try:
view_useful = int(view_useful)
except:
pass
except:
view_useful = 0
# 商品的评价信息表
each_view_list = [view_goods, view_name, view_star, view_title, view_date, view_colour, view_size,
view_body, view_useful]
self.view_list.append(each_view_list)
# print(self.view_list[-1])
def run(self, data):
goods_data = pd.read_excel(data, encoding='utf-8')
base_url = self.row_url + "/product-reviews/"
# goods_data.drop_duplicates(subset=['r','评价数量'],inplace=True)
for each_asin, each_count in zip(goods_data['ASIN'][5:50], goods_data['goods_review_count'][5:50]):
if each_asin and int(each_count) > 0:
if int(each_count) % 10 == 0:
end_page = int(each_count) // 10 + 1
else:
end_page = int(each_count) // 10 + 2
for page in range(1, end_page):
if page == 1:
url = base_url + each_asin
else:
url = base_url + each_asin + '?pageNumber=' + str(page)
self.url_queue.put(url)
print("review_page_%d" % page, url)
time.sleep(1.5)
while True:
try:
review_threads = [threading.Thread(target=self.get_review, args=(self.url_queue.get(),))
for m in range(30) if not self.url_queue.empty()]
for each in review_threads:
each.start()
print("队列剩余数量", self.url_queue.qsize())
for each in review_threads:
each.join()
except:
print("请求链接出错,重试中...")
pass
time.sleep(random.uniform(0.5,2.1))
if self.url_queue.empty():
break
view_goods_pd = pd.DataFrame(self.view_list,
columns=['review_goods', 'review_name', 'review_star', 'review_title',
'review_date', 'review_colour', 'review_size', 'review_body',
'review_useful'])
view_goods_pd.drop_duplicates(subset=['review_name', 'review_date','review_body'], inplace=True)
aft = datetime.datetime.now().strftime('%m%d%H%M')
file_name = r'../data/goods_review/' + "reviews_" + aft + ".xlsx"
view_goods_pd.to_excel(file_name, encoding='utf-8', engine='xlsxwriter')
print("共获取评论数量:", len(self.view_list))
if __name__ == '__main__':
data = r"../data/category/Kid's Weighted Blankets_08_28_13_22.xlsx"
review = Review(domain='com')
review.run(data=data)
| 39.055944
| 110
| 0.520859
| 676
| 5,585
| 4.096154
| 0.298817
| 0.028891
| 0.032864
| 0.018418
| 0.126399
| 0.113398
| 0.028169
| 0
| 0
| 0
| 0
| 0.02555
| 0.341271
| 5,585
| 142
| 111
| 39.330986
| 0.7271
| 0.025604
| 0
| 0.136364
| 0
| 0.009091
| 0.164088
| 0.081031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027273
| false
| 0.027273
| 0.054545
| 0
| 0.118182
| 0.054545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b199ca74af9fa333d99b4deab665ee6ec19fa62
| 1,032
|
py
|
Python
|
lumicks/pylake/population/tests/conftest.py
|
lumicks/pylake
|
b5875d156d6416793a371198f3f2590fca2be4cd
|
[
"Apache-2.0"
] | 8
|
2019-02-18T07:56:39.000Z
|
2022-03-19T01:14:48.000Z
|
lumicks/pylake/population/tests/conftest.py
|
lumicks/pylake
|
b5875d156d6416793a371198f3f2590fca2be4cd
|
[
"Apache-2.0"
] | 42
|
2018-11-30T14:40:35.000Z
|
2022-03-29T11:43:45.000Z
|
lumicks/pylake/population/tests/conftest.py
|
lumicks/pylake
|
b5875d156d6416793a371198f3f2590fca2be4cd
|
[
"Apache-2.0"
] | 4
|
2019-01-09T13:45:53.000Z
|
2021-07-06T14:06:52.000Z
|
import pytest
import numpy as np
from pathlib import Path
def extract_param(data, n_states):
keys = ("initial_state_prob", "transition_prob", "means", "st_devs")
param = {"n_states": n_states}
for key in keys:
param[key] = data[f"{key}_{n_states}"]
return param
@pytest.fixture(scope="session", params=[2, 3, 4])
def trace_lownoise(request):
"""Trace data can be generated by running ./data/generate_trace_data.py """
data = np.load(Path(__file__).parent / "data/trace_data.npz")
n_states = request.param
param = extract_param(data, n_states)
y = data[f"y_{n_states}"]
sp = data[f"sp_{n_states}"]
return y, sp, param
@pytest.fixture(scope="session")
def trace_simple(request):
"""Trace data can be generated by running ./data/generate_trace_data.py """
data = np.load(Path(__file__).parent / "data/trace_data.npz")
n_states = 2
param = extract_param(data, n_states)
y = data[f"y_{n_states}"]
sp = data[f"sp_{n_states}"]
return y, sp, param
| 25.8
| 79
| 0.666667
| 158
| 1,032
| 4.101266
| 0.322785
| 0.12963
| 0.074074
| 0.078704
| 0.679012
| 0.558642
| 0.558642
| 0.558642
| 0.558642
| 0.558642
| 0
| 0.004779
| 0.188953
| 1,032
| 39
| 80
| 26.461538
| 0.769415
| 0.133721
| 0
| 0.4
| 0
| 0
| 0.193878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.12
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b1f0f890c358afb721298af5289d546925c2ca1
| 42,279
|
py
|
Python
|
lisa/target.py
|
mrkajetanp/lisa
|
15cfbc430f46b59f52a9d13769d0f6791ed6f154
|
[
"Apache-2.0"
] | null | null | null |
lisa/target.py
|
mrkajetanp/lisa
|
15cfbc430f46b59f52a9d13769d0f6791ed6f154
|
[
"Apache-2.0"
] | null | null | null |
lisa/target.py
|
mrkajetanp/lisa
|
15cfbc430f46b59f52a9d13769d0f6791ed6f154
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2018, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime
import os
import os.path
import contextlib
import shlex
from collections.abc import Mapping
import copy
import sys
import argparse
import textwrap
import functools
import inspect
import pickle
import tempfile
from types import ModuleType, FunctionType
from operator import itemgetter
import devlib
from devlib.exception import TargetStableError
from devlib.utils.misc import which
from devlib.platform.gem5 import Gem5SimulationPlatform
from lisa.utils import Loggable, HideExekallID, resolve_dotted_name, get_subclasses, import_all_submodules, LISA_HOME, RESULT_DIR, LATEST_LINK, setup_logging, ArtifactPath, nullcontext, ExekallTaggable, memoized
from lisa.assets import ASSETS_PATH
from lisa.conf import SimpleMultiSrcConf, KeyDesc, LevelKeyDesc, TopLevelKeyDesc,Configurable
from lisa.generic import TypedList
from lisa.platforms.platinfo import PlatformInfo
class PasswordKeyDesc(KeyDesc):
def pretty_format(self, v):
return '<password>'
# Make sure all submodules of devlib.module are imported so the classes
# are all created before we list them
import_all_submodules(devlib.module)
_DEVLIB_AVAILABLE_MODULES = {
cls.name
for cls in get_subclasses(devlib.module.Module)
if (
getattr(cls, 'name', None)
# early modules try to connect to UART and do very
# platform-specific things we are not interested in
and getattr(cls, 'stage') != 'early'
)
}
class TargetConf(SimpleMultiSrcConf, HideExekallID):
"""
Target connection settings.
Only keys defined below are allowed, with the given meaning and type:
{generated_help}
An instance can be created by calling :class:`~TargetConf` with a
dictionary. The top-level `target-conf` key is not needed here:
.. code-block:: python
TargetConf({{
'name': 'myboard',
'host': 192.0.2.1,
'kind': 'linux',
'username': 'foo',
'password': 'bar',
}})
Or alternatively, from a YAML configuration file:
Content of target_conf.yml:
.. literalinclude:: ../target_conf.yml
:language: YAML
::
TargetConf.from_yaml_map('target_conf.yml')
The following special YAML tags can be used in the configuration file:
.. code-block:: YAML
target-conf:
# "!env:<type> ENV_VAR_NAME" can be used to reference an
# environment variable.
name: !env:str BOARD_NAME
port: !env:int PORT
.. note:: Only load trusted YAML files as it can lead to abritrary code
execution.
.. note:: That structure in a YAML file is allowed and will work:
* file foo.yml::
target-conf:
name: myboard
* file bar.yml::
target-conf:
!include foo.yml
This will result in that structure which would normally be invalid, but
is handled as a special case::
target-conf:
target-conf:
name: myboard
"""
STRUCTURE = TopLevelKeyDesc('target-conf', 'target connection settings', (
KeyDesc('name', 'Board name, free-form value only used to embelish logs', [str]),
KeyDesc('kind', 'Target kind. Can be "linux" (ssh) or "android" (adb)', [str]),
KeyDesc('host', 'Hostname or IP address of the host', [str, None]),
KeyDesc('username', 'SSH username. On ADB connections, "root" username will root adb upon target connection', [str, None]),
PasswordKeyDesc('password', 'SSH password', [str, None]),
KeyDesc('port', 'SSH or ADB server port', [int, None]),
KeyDesc('device', 'ADB device. Takes precedence over "host"', [str, None]),
KeyDesc('keyfile', 'SSH private key file', [str, None]),
KeyDesc('strict-host-check', 'Equivalent to StrictHostKeyChecking option of OpenSSH', [bool, None]),
KeyDesc('workdir', 'Remote target workdir', [str]),
KeyDesc('tools', 'List of tools to install on the target', [TypedList[str]]),
KeyDesc('lazy-platinfo', 'Lazily autodect the platform information to speed up the connection', [bool]),
LevelKeyDesc('wait-boot', 'Wait for the target to finish booting', (
KeyDesc('enable', 'Enable the boot check', [bool]),
KeyDesc('timeout', 'Timeout of the boot check', [int]),
)),
LevelKeyDesc('devlib', 'devlib configuration', (
# Using textual name of the Platform allows this YAML configuration
# to not use any python-specific YAML tags, so TargetConf files can
# be parsed and produced by any other third-party code
LevelKeyDesc('platform', 'devlib.platform.Platform subclass specification', (
KeyDesc('class', 'Name of the class to use', [str]),
KeyDesc('args', 'Keyword arguments to build the Platform object', [Mapping]),
)),
KeyDesc('excluded-modules', 'List of devlib modules to *not* load', [TypedList[str]]),
KeyDesc('file-xfer', 'File transfer method. Can be "sftp" (default) or "scp". (Only valid for linux targets)', [TypedList[str]]),
))
))
DEFAULT_SRC = {
'devlib': {
'platform': {
'class': 'devlib.platform.Platform'
}
}
}
class Target(Loggable, HideExekallID, ExekallTaggable, Configurable):
"""
Wrap :class:`devlib.target.Target` to provide additional features on top of
it.
{configurable_params}
:param devlib_platform: Instance of :class:`devlib.platform.Platform` to
use to build the :class:`devlib.target.Target`
:type devlib_platform: devlib.platform.Platform
:param plat_info: Platform information attached to this target, for the
benefits of user code.
:type plat_info: lisa.platforms.platinfo.PlatformInfo
You need to provide the information needed to connect to the
target. For SSH targets that means "host", "username" and
either "password" or "keyfile". All other fields are optional if
the relevant features aren't needed.
.. note:: The wrapping of :class:`devlib.target.Target` is done using
composition, as opposed to inheritance. This allows swapping the exact
class used under the hood, and avoids messing up with ``devlib``
internal members.
"""
ADB_PORT_DEFAULT = 5555
SSH_PORT_DEFAULT = 22
CRITICAL_TASKS = {
'linux': [
'init',
# We want to freeze everything except PID 1, we don't want to let
# sysmted-journald or systemd-timesyncd running.
'systemd[^-]',
'dbus',
'sh',
'ssh',
'rsyslogd',
'jbd2'
],
'android': [
'sh', 'adbd',
'usb', 'transport',
# We don't actually need this task but on Google Pixel it apparently
# cannot be frozen, so the cgroup state gets stuck in FREEZING if we
# try to freeze it.
'thermal-engine',
# Similar issue with HiKey960, the board will crash if this is frozen
# for too long.
'watchdogd',
]
}
"""
Dictionary mapping OS name to list of task names that we can't afford to
freeze when using :meth:`freeze_userspace`.
"""
CONF_CLASS = TargetConf
INIT_KWARGS_KEY_MAP = {
'devlib_excluded_modules': ['devlib', 'excluded-modules'],
'devlib_file_xfer': ['devlib', 'file-xfer'],
'wait_boot': ['wait-boot', 'enable'],
'wait_boot_timeout': ['wait-boot', 'timeout'],
}
def __init__(self, kind, name='<noname>', tools=[], res_dir=None,
plat_info=None, lazy_platinfo=False, workdir=None, device=None, host=None, port=None,
username=None, password=None, keyfile=None, strict_host_check=None,
devlib_platform=None, devlib_excluded_modules=[], devlib_file_xfer=None,
wait_boot=True, wait_boot_timeout=10,
):
# pylint: disable=dangerous-default-value
super().__init__()
logger = self.get_logger()
self.name = name
res_dir = res_dir if res_dir else self._get_res_dir(
root=os.path.join(LISA_HOME, RESULT_DIR),
relative='',
name=f'{self.__class__.__qualname__}-{self.name}',
append_time=True,
symlink=True
)
self._res_dir = res_dir
os.makedirs(self._res_dir, exist_ok=True)
if os.listdir(self._res_dir):
raise ValueError(f'res_dir must be empty: {self._res_dir}')
if plat_info is None:
plat_info = PlatformInfo()
else:
# Make a copy of the PlatformInfo so we don't modify the original
# one we were passed when adding the target source to it
plat_info = copy.copy(plat_info)
logger.info(f'User-defined platform information:\n{plat_info}')
self.plat_info = plat_info
# Take the board name from the target configuration so it becomes
# available for later inspection. That board name is mostly free form
# and no specific value should be expected for a given kind of board
# (i.e. a Juno board might be named "foo-bar-juno-on-my-desk")
if name:
self.plat_info.add_src('target-conf', dict(name=name))
# Determine file transfer method. Currently avaliable options
# are 'sftp' and 'scp', defaults to sftp.
if devlib_file_xfer and devlib_file_xfer not in ('scp', 'sftp'):
raise ValueError(f'Invalid file transfer method: {devlib_file_xfer}')
use_scp = devlib_file_xfer == 'scp'
self._installed_tools = set()
self.target = self._init_target(
kind=kind,
name=name,
workdir=workdir,
device=device,
host=host,
port=port,
username=username,
password=password,
keyfile=keyfile,
strict_host_check=strict_host_check,
use_scp=use_scp,
devlib_platform=devlib_platform,
wait_boot=wait_boot,
wait_boot_timeout=wait_boot_timeout,
)
devlib_excluded_modules = set(devlib_excluded_modules)
# Sorry, can't let you do that. Messing with cgroups in a systemd
# system is pretty bad idea.
if self._uses_systemd:
logger.warning('Will not load cgroups devlib module: target is using systemd, which already uses cgroups')
devlib_excluded_modules.add('cgroups')
self._devlib_loadable_modules = _DEVLIB_AVAILABLE_MODULES - devlib_excluded_modules
# Initialize binary tools to deploy
if tools:
logger.info(f'Tools to install: {tools}')
self.install_tools(tools)
# Autodetect information from the target, after the Target is
# initialized. Expensive computations are deferred so they will only be
# computed when actually needed.
rta_calib_res_dir = ArtifactPath.join(self._res_dir, 'rta_calib')
os.makedirs(rta_calib_res_dir)
self.plat_info.add_target_src(self, rta_calib_res_dir, deferred=lazy_platinfo, fallback=True)
logger.info(f'Effective platform information:\n{self.plat_info}')
@property
@memoized
def _uses_systemd(self):
try:
# Check if systemd is being used, according to:
# https://www.freedesktop.org/software/systemd/man/sd_booted.html
self.execute('test -d /run/systemd/system/', check_exit_code=True)
except TargetStableError:
return False
else:
return True
def is_module_available(self, module):
"""
Check if the given devlib module is available.
:returns: ``True`` if module is available, ``False`` otherwise.
:param module: Devlib module to check.
:type module: str
.. note:: This will attempt to load the module if it's not loaded
already, and bail out if it fails to load.
"""
if module not in _DEVLIB_AVAILABLE_MODULES:
raise ValueError(f'"{module}" is not a devlib module')
try:
getattr(self, module)
except Exception: # pylint: disable=broad-except
return False
else:
return True
def __getattr__(self, attr):
"""
Forward all non-overriden attributes/method accesses to the underlying
:class:`devlib.target.Target`.
.. note:: That will not forward special methods like __str__, since the
interpreter bypasses __getattr__ when looking them up.
.. note:: Devlib modules are loaded on demand when accessed.
"""
def get():
return getattr(self.target, attr)
try:
return get()
except AttributeError:
# Load the module on demand
if attr in self._devlib_loadable_modules:
self.get_logger().info(f'Loading target devlib module {attr}')
self.target.install_module(attr)
return get()
# If it was not in the loadable list, it
# has been excluded explicitly
elif attr in _DEVLIB_AVAILABLE_MODULES:
# pylint: disable=raise-missing-from
raise AttributeError(f'Devlib target module {attr} was explicitly excluded, not loading it')
# Something else that does not exist ...
else:
raise
def __dir__(self):
"""
List our attributes plus the ones from the underlying target, and the
devlib modules that could be loaded on-demand.
"""
attrs = set(super().__dir__()) | set(dir(self.target)) | self._devlib_loadable_modules
return sorted(attrs)
@classmethod
def from_conf(cls, conf: TargetConf, res_dir: ArtifactPath = None, plat_info: PlatformInfo = None) -> 'Target':
cls.get_logger().info(f'Target configuration:\n{conf}')
kwargs = cls.conf_to_init_kwargs(conf)
kwargs['res_dir'] = res_dir
kwargs['plat_info'] = plat_info
# Create a devlib Platform instance out of the configuration file
devlib_platform_conf = conf['devlib']['platform']
devlib_platform_cls = resolve_dotted_name(devlib_platform_conf['class'])
devlib_platform_kwargs = copy.copy(devlib_platform_conf.get('args', {}))
# Hack for Gem5 devlib Platform, that requires a "host_output_dir"
# argument computed at runtime.
# Note: lisa.target.Gem5SimulationPlatformWrapper should be used instead
# of the original one to benefit from mapping configuration
if issubclass(devlib_platform_cls, Gem5SimulationPlatform):
devlib_platform_kwargs.setdefault('host_output_dir', res_dir)
# Actually build the devlib Platform object
devlib_platform = devlib_platform_cls(**devlib_platform_kwargs)
kwargs['devlib_platform'] = devlib_platform
cls.check_init_param(**kwargs)
return cls(**kwargs)
@classmethod
def from_default_conf(cls):
"""
Create a :class:`Target` from the YAML configuration file pointed by
``LISA_CONF`` environment variable.
.. note:: Only load trusted YAML files as it can lead to abritrary code
execution.
"""
path = os.environ['LISA_CONF']
return cls.from_one_conf(path)
@classmethod
def from_one_conf(cls, path):
"""
Create a :class:`Target` from a single YAML configuration file.
This file will be used to provide a :class:`TargetConf` and
:class:`lisa.platforms.platinfo.PlatformInfo` instances.
.. note:: Only load trusted YAML files as it can lead to abritrary code
execution.
"""
conf = TargetConf.from_yaml_map(path)
try:
plat_info = PlatformInfo.from_yaml_map(path)
except Exception as e: # pylint: disable=broad-except
cls.get_logger().warning(f'No platform information could be found: {e}')
plat_info = None
return cls.from_conf(conf=conf, plat_info=plat_info)
@classmethod
# Keep the signature without *args and **kwargs so that it's usable by exekall
def from_cli(cls, argv=None, params=None) -> 'Target':
"""
Same as :meth:`from_custom_cli` without the custom parameters
capabilities.
:return: A connected :class:`Target`
"""
_, target = cls.from_custom_cli(argv=argv, params=params)
return target
@classmethod
def from_custom_cli(cls, argv=None, params=None):
"""
Create a Target from command line arguments.
:param argv: The list of arguments. ``sys.argv[1:]`` will be used if
this is ``None``.
:type argv: list(str)
:param params: Dictionary of custom parameters to add to the parser. It
is in the form of
``{param_name: {dict of ArgumentParser.add_argument() options}}``.
:type params: dict(str, dict)
:return: A tuple ``(args, target)``
.. note:: This method should not be relied upon to implement long-term
scripts, it's more designed for quick scripting.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(
"""
Connect to a target using the provided configuration in order
to run a test.
EXAMPLES
--conf can point to a YAML target configuration file
with all the necessary connection information:
$ {script} --conf my_target.yml
Alternatively, --kind must be set along the relevant credentials:
$ {script} --kind linux --host 192.0.2.1 --username root --password root
In both cases, --conf can also contain a PlatformInfo YAML description.
Note: only load trusted YAML files as it can lead to abritrary
code execution.
""".format(
script=os.path.basename(sys.argv[0])
)))
parser.add_argument("--conf", '-c',
help="Path to a TargetConf and PlatformInfo yaml file. Other options will override what is specified in the file."
)
parser.add_argument("--kind", "-k",
choices=["android", "linux", "host"],
help="The kind of target to connect to.")
device_group = parser.add_mutually_exclusive_group()
device_group.add_argument("--device", "-d",
help="The ADB ID of the target. Superseeds --host. Only applies to Android kind.")
device_group.add_argument("--host", "-n",
help="The hostname/IP of the target.")
parser.add_argument("--username", "-u",
help="Login username. Only applies to Linux kind.")
parser.add_argument("--password", "-p",
help="Login password. Only applies to Linux kind.")
parser.add_argument("--log-level",
default='info',
choices=('warning', 'info', 'debug'),
help="Verbosity level of the logs.")
parser.add_argument("--res-dir", "-o",
help="Result directory of the created Target. If no directory is specified, a default location under $LISA_HOME will be used.")
params = params or {}
for param, settings in params.items():
parser.add_argument(f'--{param}', **settings)
custom_params = {k.replace('-', '_') for k in params.keys()}
# Options that are not a key in TargetConf must be listed here
not_target_conf_opt = {
'platform_info', 'log_level', 'res_dir', 'conf',
}
not_target_conf_opt.update(custom_params)
args = parser.parse_args(argv)
setup_logging(level=args.log_level.upper())
target_conf = TargetConf()
platform_info = None
if args.conf:
# Tentatively load a PlatformInfo from the conf file
with contextlib.suppress(KeyError, ValueError):
platform_info = PlatformInfo.from_yaml_map(args.conf)
# Load the TargetConf from the file, and update it with command
# line arguments
try:
conf = TargetConf.from_yaml_map(args.conf)
except (KeyError, ValueError):
pass
else:
target_conf.add_src(args.conf, conf)
target_conf.add_src('command-line', {
k: v for k, v in vars(args).items()
if v is not None and k not in not_target_conf_opt
})
# Some sanity check to get better error messages
if 'kind' not in target_conf:
parser.error('--conf with target configuration or any of the connection options is required')
if args.kind == 'android':
if ('host' not in target_conf) and ('device' not in target_conf):
parser.error('--host or --device must be specified')
if args.kind == 'linux':
for required in ['host', 'username', 'password']:
if required not in target_conf:
parser.error(f'--{required} must be specified')
custom_args = {
param: value
for param, value in vars(args).items()
if param in custom_params
}
custom_args = argparse.Namespace(**custom_args)
return custom_args, cls.from_conf(conf=target_conf, plat_info=platform_info, res_dir=args.res_dir)
def _init_target(self, kind, name, workdir, device, host,
port, username, password, keyfile, strict_host_check, use_scp,
devlib_platform,
wait_boot, wait_boot_timeout,
):
"""
Initialize the Target
"""
logger = self.get_logger()
conn_settings = {}
resolved_username = username or 'root'
logger.debug(f'Setting up {kind} target...')
# If the target is Android, we need just (eventually) the device
if kind == 'android':
devlib_target_cls = devlib.AndroidTarget
# Workaround for ARM-software/devlib#225
workdir = workdir or '/data/local/tmp/devlib-target'
if device:
pass
elif host:
port = port or self.ADB_PORT_DEFAULT
device = f'{host}:{port}'
else:
device = 'DEFAULT'
conn_settings['device'] = device
# If the username was explicitly set to "root", root the target as
# early as possible
conn_settings['adb_as_root'] = (username == 'root')
elif kind == 'linux':
devlib_target_cls = devlib.LinuxTarget
conn_settings.update(
username=resolved_username,
port=port or self.SSH_PORT_DEFAULT,
host=host,
strict_host_check=True if strict_host_check is None else strict_host_check,
use_scp=False if use_scp is None else use_scp,
)
# Configure password or SSH keyfile
if keyfile:
conn_settings['keyfile'] = keyfile
else:
conn_settings['password'] = password
elif kind == 'host':
devlib_target_cls = devlib.LocalLinuxTarget
# If we are given a password, assume we can use it as a sudo
# password.
conn_settings.update(
unrooted=password is None,
password=password,
)
else:
raise ValueError(f'Unsupported platform type {kind}')
settings = '\n '.join(
f' {key}: {val}'
for key, val in conn_settings.items()
if key != 'password'
)
logger.debug(f'{kind} {name} target connection settings:\n {settings}')
########################################################################
# Devlib Platform configuration
########################################################################
if not devlib_platform:
devlib_platform = devlib.platform.Platform()
########################################################################
# Create devlib Target object
########################################################################
target = devlib_target_cls(
platform=devlib_platform,
load_default_modules=False,
connection_settings=conn_settings,
working_directory=workdir,
connect=False,
)
target.connect(check_boot_completed=wait_boot, timeout=wait_boot_timeout)
# None as username means adb root will be attempted, but failure will
# not prevent from connecting to the target.
if kind == 'android' and username is None:
try:
target.adb_root(enable=True)
except Exception as e: # pylint: disable=broad-except
logger.warning(f'"adb root" failed: {e}')
logger.debug(f'Target info: {dict(abi=target.abi, cpuinfo=target.cpuinfo, workdir=target.working_directory)}')
target.setup()
logger.info(f"Connected to target {(name or '')}")
return target
def get_res_dir(self, name=None, append_time=True, symlink=True):
"""
Returns a directory managed by LISA to store results.
Usage of that function is reserved to interactive use or simple scripts.
Tests should not rely on that as the created folder will not be tracked
by any external entity, which means the results will be lost in some
automated environment.
:param name: Name of the results directory
:type name: str
:param append_time: If True, the current datetime will be appended to
the given ``name``. If ``name`` is None, the directory name will be
the current datetime.
:type append_time: bool
:param symlink: Create a symlink named ``results_latest`` to the newly
created results directory
:type symlink: bool
"""
if isinstance(self._res_dir, ArtifactPath):
root = self._res_dir.root
relative = self._res_dir.relative
else:
root = self._res_dir
relative = ''
return self._get_res_dir(
root=root,
relative=relative,
name=name,
append_time=append_time,
symlink=symlink,
)
def _get_res_dir(self, root, relative, name, append_time, symlink):
logger = self.get_logger()
while True:
time_str = datetime.now().strftime('%Y%m%d_%H%M%S.%f')
if not name:
name = time_str
elif append_time:
name = f"{name}-{time_str}"
# If we were given an ArtifactPath with an existing root, we
# preserve that root so it can be relocated as the caller wants it
res_dir = ArtifactPath(root, os.path.join(relative, name))
# Compute base installation path
logger.info(f'Creating result directory: {res_dir}')
# It will fail if the folder already exists. In that case,
# append_time should be used to ensure we get a unique name.
try:
os.makedirs(res_dir)
break
except FileExistsError:
# If the time is used in the name, there is some hope that the
# next time it will succeed
if append_time:
logger.info('Directory already exists, retrying ...')
continue
else:
raise
if symlink:
res_lnk = os.path.join(LISA_HOME, LATEST_LINK)
with contextlib.suppress(FileNotFoundError):
os.remove(res_lnk)
# There may be a race condition with another tool trying to create
# the link
with contextlib.suppress(FileExistsError):
os.symlink(res_dir, res_lnk)
return res_dir
def install_tools(self, tools):
"""
Install tools additional to those specified in the test config 'tools'
field
:param tools: The list of names of tools to install
:type tools: list(str)
"""
def bin_path(tool):
binary = os.path.join(ASSETS_PATH, 'binaries', self.abi, tool)
if not os.path.isfile(binary):
binary = os.path.join(ASSETS_PATH, 'binaries', 'scripts', tool)
return binary
tools = set(tools) - self._installed_tools
# TODO: compute the checksum of the tool + install location and keep
# that in _installed_tools, so we are sure to be correct
for tool in map(bin_path, tools):
self.target.install(tool)
self._installed_tools.add(tool)
@contextlib.contextmanager
def freeze_userspace(self):
"""
Context manager that lets you freeze the userspace.
.. note:: A number of situations prevent from freezing anything. When
that happens, a warning is logged but no exception is raised, so
it's a best-effort approach.
"""
logger = self.get_logger()
if not self.is_rooted:
logger.warning('Could not freeze userspace: target is not rooted')
cm = nullcontext
elif not self.is_module_available('cgroups'):
logger.warning('Could not freeze userspace: "cgroups" devlib module is necessary')
cm = nullcontext
else:
controllers = [s.name for s in self.cgroups.list_subsystems()]
if 'freezer' not in controllers:
logger.warning('Could not freeze userspace: freezer cgroup controller not available on the target')
cm = nullcontext
else:
exclude = copy.copy(self.CRITICAL_TASKS[self.target.os])
# Do not freeze the process in charge of de-freezing, otherwise we
# will freeze to death and a machine hard reboot will be required
if isinstance(self.target, devlib.LocalLinuxTarget):
exclude.append(str(os.getpid()))
@contextlib.contextmanager
def cm():
logger.info(f"Freezing all tasks except: {','.join(exclude)}")
try:
yield self.cgroups.freeze(exclude)
finally:
logger.info('Un-freezing userspace tasks')
self.cgroups.freeze(thaw=True)
with cm() as x:
yield x
@contextlib.contextmanager
def disable_idle_states(self):
"""
Context manager that lets you disable all idle states
"""
logger = self.get_logger()
logger.info('Disabling idle states for all domains')
try:
cpuidle = self.cpuidle
except AttributeError:
logger.warning('Could not disable idle states, cpuidle devlib module is not loaded')
cm = nullcontext
else:
@contextlib.contextmanager
def cm():
try:
for cpu in range(self.plat_info['cpus-count']):
cpuidle.disable_all(cpu)
yield
finally:
logger.info('Re-enabling idle states for all domains')
for cpu in range(self.plat_info['cpus-count']):
cpuidle.enable_all(cpu)
with cm() as x:
yield x
def get_tags(self):
return {'board': self.name}
@classmethod
def _make_remote_snippet(cls, name, code_str, module, kwargs, global_vars, out_tempfiles):
# Inject the parameters inside the wrapper's globals so that it can
# access them. It's harmless as they would shadow any global name
# anyway, and it's restricted to the wrapper using eval()
global_vars = {
**global_vars,
**kwargs,
}
# Treat the modules separately as they cannot be pickled
modules = {
name: mod
for name, mod in global_vars.items()
if isinstance(mod, ModuleType)
}
def can_include(f):
return (
isinstance(f, FunctionType) and
# Only allow inlining of functions defined in the same module so that:
# 1. there is no name clash risk
# 2. we don't inline the whole world, which could lead to a
# number of problems that could appear after another module
# is updated or so. We only inline local things that are in
# direct control
f.__module__ == module
)
def add_func(f, name):
# Disallow decorated functions since their definition depends on
# external callable we cannot control
if hasattr(f, '__wrapped__'):
raise TypeError('Decorated functions cannot be called from remote functions')
closure_vars = {
name: val
for var_dct in inspect.getclosurevars(f)
if isinstance(var_dct, Mapping)
for name, val in var_dct.items()
}
funcs[name] = (f, cls._get_code(f)[1])
for _name, _f in closure_vars.items():
if _f is not f and can_include(_f):
add_func(_f, _name)
modules.update(
(name, mod)
for name, mod in closure_vars.items()
if isinstance(mod, ModuleType)
)
funcs = {}
for f_name, f in global_vars.items():
if can_include(f):
add_func(f, f_name)
code_str += '\n' + '\n'.join(map(itemgetter(1), funcs.values()))
non_pickled = set(modules.keys()) | set(funcs.keys())
global_vars = {
name: val
for name, val in global_vars.items()
if name not in non_pickled
}
if modules:
modules = f"import {', '.join(sorted(modules))}"
else:
modules = ''
script = textwrap.dedent('''
import pickle
import sys
def wrapper():
{modules}
{code}
return {f}({kwargs})
try:
out = eval(wrapper.__code__, pickle.loads({globals}))
except BaseException as e:
out = e
out_is_excep = True
else:
out_is_excep = False
out = pickle.dumps(out)
out_tempfile = {out_tempfiles}[1] if out_is_excep else {out_tempfiles}[0]
with open(out_tempfile, 'wb') as f:
f.write(out)
''').format(
f=name,
code=textwrap.dedent(code_str).replace('\n', '\n' + ' ' * 4),
modules=modules,
out_tempfiles=repr(out_tempfiles),
globals=repr(pickle.dumps(global_vars)),
kwargs=', '.join(
f'{name}={name}'
for name in kwargs.keys()
)
)
return script
@staticmethod
def _get_code(f):
lines, _ = inspect.getsourcelines(f)
# Remove decorators, as they are either undefined or just were used to
# feed the function to us
lines = [
line
for line in lines
if not line.strip().startswith('@')
]
code_str = textwrap.dedent(''.join(lines))
name = f.__name__
return (name, code_str)
def execute_python(self, f, args, kwargs, **execute_kwargs):
"""
Executes the given Python function ``f`` with the provided positional
and keyword arguments.
The return value or any exception is pickled back and is
returned/raised in the host caller.
:Variable keyword arguments: Forwarded to :meth:`execute` that
will spawn the Python interpreter on the target
.. note:: Closure variables are supported, but mutating them will not
be reflected in the caller's context. Also, functions that are
referred to will be:
* bundled in the script if it is defined in the same module
* referred to by name, assuming it comes from a module that is
installed on the target and that this module is in scope. If
that is not the case, a :exc:`NameError` will be raised.
.. attention:: Decorators are ignored and not applied.
"""
sig = inspect.signature(f)
kwargs = sig.bind(*args, **kwargs).arguments
closure_vars = inspect.getclosurevars(f)
name, code_str = self._get_code(f)
def mktemp():
return self.execute(
f'mktemp -p {shlex.quote(self.working_directory)}'
).strip()
def read_output(path):
with tempfile.TemporaryDirectory() as d:
name = os.path.join(d, 'out')
self.pull(path, name)
with open(name, 'rb') as f:
return pickle.loads(f.read())
def parse_output(paths, err):
val, excep = paths
try:
return read_output(val)
# If the file is empty, we probably got an exception
except EOFError:
# pylint: disable=raise-missing-from
try:
excep = read_output(excep)
# If we can't even read the exception, raise the initial one
# from devlib
except EOFError:
raise err if err is not None else ValueError('No exception was raised or value returned by the function')
else:
raise excep
out_tempfiles = tuple()
try:
out_tempfiles = (mktemp(), mktemp())
snippet = self._make_remote_snippet(
name=name,
code_str=code_str,
module=f.__module__,
kwargs=kwargs,
global_vars={
**closure_vars.globals,
**closure_vars.nonlocals,
},
out_tempfiles=out_tempfiles
)
cmd = ['python3', '-c', snippet]
cmd = ' '.join(map(shlex.quote, cmd))
try:
self.execute(cmd, **execute_kwargs)
except Exception as e: # pylint: disable=broad-except
err = e
else:
err = None
return parse_output(out_tempfiles, err)
finally:
for path in out_tempfiles:
self.remove(path)
def remote_func(self, **kwargs):
"""
Decorates a given function to execute remotely using
:meth:`execute_python`::
target = Target(...)
@target.remote_func(timeout=42)
def foo(x, y):
return x + y
# Execute the function on the target transparently
val = foo(1, y=2)
:Variable keyword arguments: Forwarded to :meth:`execute` that
will spawn the Python interpreter on the target
"""
def wrapper_param(f):
@functools.wraps(f)
def wrapper(*f_args, **f_kwargs):
return self.execute_python(f, f_args, f_kwargs, **kwargs)
return wrapper
return wrapper_param
class Gem5SimulationPlatformWrapper(Gem5SimulationPlatform):
def __init__(self, system, simulator, **kwargs):
simulator_args = copy.copy(simulator.get('args', []))
system_platform = system['platform']
# Get gem5 binary arguments
simulator_args.append('--listener-mode=on')
simulator_args.append(system_platform['description'])
simulator_args.extend(system_platform.get('args', []))
simulator_args.extend((
f"--kernel {system['kernel']}",
f"--dtb {system['dtb']}",
f"--disk-image {system['disk']}"
))
diod_path = which('diod')
if diod_path is None:
raise RuntimeError('Failed to find "diod" on your host machine, check your installation or your PATH variable')
# Setup virtio
# Brackets are there to let the output dir be created automatically
virtio_args = [
f'--which-diod={diod_path}',
'--workload-automation-vio={}',
]
simulator_args.extend(virtio_args)
# Quote/escape arguments and build the command line
gem5_args = ' '.join(shlex.quote(a) for a in simulator_args)
super().__init__(
gem5_args=gem5_args,
gem5_bin=simulator['bin'],
**kwargs
)
# vim :set tabstop=4 shiftwidth=4 expandtab textwidth=80
| 36.636915
| 211
| 0.584404
| 4,940
| 42,279
| 4.878947
| 0.180364
| 0.008713
| 0.008215
| 0.008132
| 0.097585
| 0.063397
| 0.032611
| 0.02979
| 0.021326
| 0.021326
| 0
| 0.002373
| 0.322217
| 42,279
| 1,153
| 212
| 36.66869
| 0.838707
| 0.278389
| 0
| 0.169014
| 0
| 0.004695
| 0.177316
| 0.016924
| 0.001565
| 0
| 0
| 0.000867
| 0
| 1
| 0.053208
| false
| 0.023474
| 0.045383
| 0.00939
| 0.162754
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b204b59051969cf45dc90d85f76793faabc4ec6
| 644
|
py
|
Python
|
evalme/tests/test_old_format.py
|
heartexlabs/label-studio-evalme
|
48f7a5226346b6e074edb4717b84122cc089bc7a
|
[
"MIT"
] | 3
|
2020-04-11T13:01:57.000Z
|
2021-05-19T13:53:16.000Z
|
evalme/tests/test_old_format.py
|
heartexlabs/label-studio-evalme
|
48f7a5226346b6e074edb4717b84122cc089bc7a
|
[
"MIT"
] | 28
|
2020-05-21T01:34:44.000Z
|
2022-03-21T15:39:16.000Z
|
evalme/tests/test_old_format.py
|
heartexlabs/label-studio-evalme
|
48f7a5226346b6e074edb4717b84122cc089bc7a
|
[
"MIT"
] | 1
|
2020-05-21T17:43:26.000Z
|
2020-05-21T17:43:26.000Z
|
from evalme.matcher import Matcher
def test_old_format_agreement_matrix():
m = Matcher(new_format=False)
m.load(r"./tests/test_data/test_old_format.json")
matrix = m.get_annotations_agreement()
assert matrix is not None
assert matrix > 0
def test_old_format_load():
m = Matcher(new_format=False)
m.load(r"./tests/test_data/test_old_format.json")
assert m._new_format is False
assert m._result_name == 'completions'
def test_new_format_load():
m = Matcher(new_format=False)
m.load(r"./tests/test_data/test_bbox.json")
assert m._new_format is True
assert m._result_name == 'annotations'
| 25.76
| 53
| 0.723602
| 100
| 644
| 4.35
| 0.3
| 0.124138
| 0.11954
| 0.117241
| 0.508046
| 0.508046
| 0.416092
| 0.416092
| 0.416092
| 0.416092
| 0
| 0.001869
| 0.169255
| 644
| 24
| 54
| 26.833333
| 0.811215
| 0
| 0
| 0.294118
| 0
| 0
| 0.201863
| 0.167702
| 0
| 0
| 0
| 0
| 0.352941
| 1
| 0.176471
| false
| 0
| 0.058824
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b20937e56fc436965d29a3e4d7196bce1d5cd54
| 30,942
|
py
|
Python
|
behave/runner.py
|
wombat70/behave
|
c54493b0531795d946ac6754bfc643248cf3056a
|
[
"BSD-2-Clause"
] | 13
|
2019-10-03T19:15:14.000Z
|
2019-10-16T02:01:57.000Z
|
behave/runner.py
|
wombat70/behave
|
c54493b0531795d946ac6754bfc643248cf3056a
|
[
"BSD-2-Clause"
] | null | null | null |
behave/runner.py
|
wombat70/behave
|
c54493b0531795d946ac6754bfc643248cf3056a
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""
This module provides Runner class to run behave feature files (or model elements).
"""
from __future__ import absolute_import, print_function, with_statement
import contextlib
import os.path
import sys
import warnings
import weakref
import six
from behave._types import ExceptionUtil
from behave.capture import CaptureController
from behave.exception import ConfigError
from behave.formatter._registry import make_formatters
from behave.runner_util import \
collect_feature_locations, parse_features, \
exec_file, load_step_modules, PathManager
from behave.step_registry import registry as the_step_registry
from enum import Enum
if six.PY2:
# -- USE PYTHON3 BACKPORT: With unicode traceback support.
import traceback2 as traceback
else:
import traceback
class CleanupError(RuntimeError):
pass
class ContextMaskWarning(UserWarning):
"""Raised if a context variable is being overwritten in some situations.
If the variable was originally set by user code then this will be raised if
*behave* overwrites the value.
If the variable was originally set by *behave* then this will be raised if
user code overwrites the value.
"""
pass
class ContextMode(Enum):
"""Used to distinguish between the two usage modes while using the context:
* BEHAVE: Indicates "behave" (internal) mode
* USER: Indicates "user" mode (in steps, hooks, fixtures, ...)
"""
BEHAVE = 1
USER = 2
class Context(object):
"""Hold contextual information during the running of tests.
This object is a place to store information related to the tests you're
running. You may add arbitrary attributes to it of whatever value you need.
During the running of your tests the object will have additional layers of
namespace added and removed automatically. There is a "root" namespace and
additional namespaces for features and scenarios.
Certain names are used by *behave*; be wary of using them yourself as
*behave* may overwrite the value you set. These names are:
.. attribute:: feature
This is set when we start testing a new feature and holds a
:class:`~behave.model.Feature`. It will not be present outside of a
feature (i.e. within the scope of the environment before_all and
after_all).
.. attribute:: scenario
This is set when we start testing a new scenario (including the
individual scenarios of a scenario outline) and holds a
:class:`~behave.model.Scenario`. It will not be present outside of the
scope of a scenario.
.. attribute:: tags
The current set of active tags (as a Python set containing instances of
:class:`~behave.model.Tag` which are basically just glorified strings)
combined from the feature and scenario. This attribute will not be
present outside of a feature scope.
.. attribute:: aborted
This is set to true in the root namespace when the user aborts a test run
(:exc:`KeyboardInterrupt` exception). Initially: False.
.. attribute:: failed
This is set to true in the root namespace as soon as a step fails.
Initially: False.
.. attribute:: table
This is set at the step level and holds any :class:`~behave.model.Table`
associated with the step.
.. attribute:: text
This is set at the step level and holds any multiline text associated
with the step.
.. attribute:: config
The configuration of *behave* as determined by configuration files and
command-line options. The attributes of this object are the same as the
`configuration file section names`_.
.. attribute:: active_outline
This is set for each scenario in a scenario outline and references the
:class:`~behave.model.Row` that is active for the current scenario. It is
present mostly for debugging, but may be useful otherwise.
.. attribute:: log_capture
If logging capture is enabled then this attribute contains the captured
logging as an instance of :class:`~behave.log_capture.LoggingCapture`.
It is not present if logging is not being captured.
.. attribute:: stdout_capture
If stdout capture is enabled then this attribute contains the captured
output as a StringIO instance. It is not present if stdout is not being
captured.
.. attribute:: stderr_capture
If stderr capture is enabled then this attribute contains the captured
output as a StringIO instance. It is not present if stderr is not being
captured.
A :class:`behave.runner.ContextMaskWarning` warning will be raised if user
code attempts to overwrite one of these variables, or if *behave* itself
tries to overwrite a user-set variable.
You may use the "in" operator to test whether a certain value has been set
on the context, for example:
"feature" in context
checks whether there is a "feature" value in the context.
Values may be deleted from the context using "del" but only at the level
they are set. You can't delete a value set by a feature at a scenario level
but you can delete a value set for a scenario in that scenario.
.. _`configuration file section names`: behave.html#configuration-files
"""
# pylint: disable=too-many-instance-attributes
FAIL_ON_CLEANUP_ERRORS = True
def __init__(self, runner):
self._runner = weakref.proxy(runner)
self._config = runner.config
d = self._root = {
"aborted": False,
"failed": False,
"config": self._config,
"active_outline": None,
"cleanup_errors": 0,
"@cleanups": [], # -- REQUIRED-BY: before_all() hook
"@layer": "testrun",
}
self._stack = [d]
self._record = {}
self._origin = {}
self._mode = ContextMode.BEHAVE
# -- MODEL ENTITY REFERENCES/SUPPORT:
self.feature = None
# DISABLED: self.rule = None
# DISABLED: self.scenario = None
self.text = None
self.table = None
# -- RUNTIME SUPPORT:
self.stdout_capture = None
self.stderr_capture = None
self.log_capture = None
self.fail_on_cleanup_errors = self.FAIL_ON_CLEANUP_ERRORS
@staticmethod
def ignore_cleanup_error(context, cleanup_func, exception):
pass
@staticmethod
def print_cleanup_error(context, cleanup_func, exception):
cleanup_func_name = getattr(cleanup_func, "__name__", None)
if not cleanup_func_name:
cleanup_func_name = "%r" % cleanup_func
print(u"CLEANUP-ERROR in %s: %s: %s" %
(cleanup_func_name, exception.__class__.__name__, exception))
traceback.print_exc(file=sys.stdout)
# MAYBE: context._dump(pretty=True, prefix="Context: ")
# -- MARK: testrun as FAILED
# context._set_root_attribute("failed", True)
def _do_cleanups(self):
"""Execute optional cleanup functions when stack frame is popped.
A user can add a user-specified handler for cleanup errors.
.. code-block:: python
# -- FILE: features/environment.py
def cleanup_database(database):
pass
def handle_cleanup_error(context, cleanup_func, exception):
pass
def before_all(context):
context.on_cleanup_error = handle_cleanup_error
context.add_cleanup(cleanup_database, the_database)
"""
# -- BEST-EFFORT ALGORITHM: Tries to perform all cleanups.
assert self._stack, "REQUIRE: Non-empty stack"
current_layer = self._stack[0]
cleanup_funcs = current_layer.get("@cleanups", [])
on_cleanup_error = getattr(self, "on_cleanup_error",
self.print_cleanup_error)
context = self
cleanup_errors = []
for cleanup_func in reversed(cleanup_funcs):
try:
cleanup_func()
except Exception as e: # pylint: disable=broad-except
# pylint: disable=protected-access
context._root["cleanup_errors"] += 1
cleanup_errors.append(sys.exc_info())
on_cleanup_error(context, cleanup_func, e)
if self.fail_on_cleanup_errors and cleanup_errors:
first_cleanup_erro_info = cleanup_errors[0]
del cleanup_errors # -- ENSURE: Release other exception frames.
six.reraise(*first_cleanup_erro_info)
def _push(self, layer_name=None):
"""Push a new layer on the context stack.
HINT: Use layer_name values: "scenario", "feature", "testrun".
:param layer_name: Layer name to use (or None).
"""
initial_data = {"@cleanups": []}
if layer_name:
initial_data["@layer"] = layer_name
self._stack.insert(0, initial_data)
def _pop(self):
"""Pop the current layer from the context stack.
Performs any pending cleanups, registered for this layer.
"""
try:
self._do_cleanups()
finally:
# -- ENSURE: Layer is removed even if cleanup-errors occur.
self._stack.pop(0)
def _use_with_behave_mode(self):
"""Provides a context manager for using the context in BEHAVE mode."""
return use_context_with_mode(self, ContextMode.BEHAVE)
def use_with_user_mode(self):
"""Provides a context manager for using the context in USER mode."""
return use_context_with_mode(self, ContextMode.USER)
def user_mode(self):
warnings.warn("Use 'use_with_user_mode()' instead",
PendingDeprecationWarning, stacklevel=2)
return self.use_with_user_mode()
def _set_root_attribute(self, attr, value):
for frame in self.__dict__["_stack"]:
if frame is self.__dict__["_root"]:
continue
if attr in frame:
record = self.__dict__["_record"][attr]
params = {
"attr": attr,
"filename": record[0],
"line": record[1],
"function": record[3],
}
self._emit_warning(attr, params)
self.__dict__["_root"][attr] = value
if attr not in self._origin:
self._origin[attr] = self._mode
def _emit_warning(self, attr, params):
msg = ""
if self._mode is ContextMode.BEHAVE and self._origin[attr] is not ContextMode.BEHAVE:
msg = "behave runner is masking context attribute '%(attr)s' " \
"originally set in %(function)s (%(filename)s:%(line)s)"
elif self._mode is ContextMode.USER:
if self._origin[attr] is not ContextMode.USER:
msg = "user code is masking context attribute '%(attr)s' " \
"originally set by behave"
elif self._config.verbose:
msg = "user code is masking context attribute " \
"'%(attr)s'; see the tutorial for what this means"
if msg:
msg = msg % params
warnings.warn(msg, ContextMaskWarning, stacklevel=3)
def _dump(self, pretty=False, prefix=" "):
for level, frame in enumerate(self._stack):
print("%sLevel %d" % (prefix, level))
if pretty:
for name in sorted(frame.keys()):
value = frame[name]
print("%s %-15s = %r" % (prefix, name, value))
else:
print(prefix + repr(frame))
def __getattr__(self, attr):
if attr[0] == "_":
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError(attr)
for frame in self._stack:
if attr in frame:
return frame[attr]
msg = "'{0}' object has no attribute '{1}'"
msg = msg.format(self.__class__.__name__, attr)
raise AttributeError(msg)
def __setattr__(self, attr, value):
if attr[0] == "_":
self.__dict__[attr] = value
return
for frame in self._stack[1:]:
if attr in frame:
record = self._record[attr]
params = {
"attr": attr,
"filename": record[0],
"line": record[1],
"function": record[3],
}
self._emit_warning(attr, params)
stack_limit = 2
if six.PY2:
stack_limit += 1 # Due to traceback2 usage.
stack_frame = traceback.extract_stack(limit=stack_limit)[0]
self._record[attr] = stack_frame
frame = self._stack[0]
frame[attr] = value
if attr not in self._origin:
self._origin[attr] = self._mode
def __delattr__(self, attr):
frame = self._stack[0]
if attr in frame:
del frame[attr]
del self._record[attr]
else:
msg = "'{0}' object has no attribute '{1}' at the current level"
msg = msg.format(self.__class__.__name__, attr)
raise AttributeError(msg)
def __contains__(self, attr):
if attr[0] == "_":
return attr in self.__dict__
for frame in self._stack:
if attr in frame:
return True
return False
def execute_steps(self, steps_text):
"""The steps identified in the "steps" text string will be parsed and
executed in turn just as though they were defined in a feature file.
If the execute_steps call fails (either through error or failure
assertion) then the step invoking it will need to catch the resulting
exceptions.
:param steps_text: Text with the Gherkin steps to execute (as string).
:returns: True, if the steps executed successfully.
:raises: AssertionError, if a step failure occurs.
:raises: ValueError, if invoked without a feature context.
"""
assert isinstance(steps_text, six.text_type), "Steps must be unicode."
if not self.feature:
raise ValueError("execute_steps() called outside of feature")
# -- PREPARE: Save original context data for current step.
# Needed if step definition that called this method uses .table/.text
original_table = getattr(self, "table", None)
original_text = getattr(self, "text", None)
self.feature.parser.variant = "steps"
steps = self.feature.parser.parse_steps(steps_text)
with self._use_with_behave_mode():
for step in steps:
passed = step.run(self._runner, quiet=True, capture=False)
if not passed:
# -- ISSUE #96: Provide more substep info to diagnose problem.
step_line = u"%s %s" % (step.keyword, step.name)
message = "%s SUB-STEP: %s" % \
(step.status.name.upper(), step_line)
if step.error_message:
message += "\nSubstep info: %s\n" % step.error_message
message += u"Traceback (of failed substep):\n"
message += u"".join(traceback.format_tb(step.exc_traceback))
# message += u"\nTraceback (of context.execute_steps()):"
assert False, message
# -- FINALLY: Restore original context data for current step.
self.table = original_table
self.text = original_text
return True
def add_cleanup(self, cleanup_func, *args, **kwargs):
"""Adds a cleanup function that is called when :meth:`Context._pop()`
is called. This is intended for user-cleanups.
:param cleanup_func: Callable function
:param args: Args for cleanup_func() call (optional).
:param kwargs: Kwargs for cleanup_func() call (optional).
"""
# MAYBE:
assert callable(cleanup_func), "REQUIRES: callable(cleanup_func)"
assert self._stack
if args or kwargs:
def internal_cleanup_func():
cleanup_func(*args, **kwargs)
else:
internal_cleanup_func = cleanup_func
current_frame = self._stack[0]
if cleanup_func not in current_frame["@cleanups"]:
# -- AVOID DUPLICATES:
current_frame["@cleanups"].append(internal_cleanup_func)
@contextlib.contextmanager
def use_context_with_mode(context, mode):
"""Switch context to ContextMode.BEHAVE or ContextMode.USER mode.
Provides a context manager for switching between the two context modes.
.. sourcecode:: python
context = Context()
with use_context_with_mode(context, ContextMode.BEHAVE):
... # Do something
# -- POSTCONDITION: Original context._mode is restored.
:param context: Context object to use.
:param mode: Mode to apply to context object.
"""
# pylint: disable=protected-access
assert mode in (ContextMode.BEHAVE, ContextMode.USER)
current_mode = context._mode
try:
context._mode = mode
yield
finally:
# -- RESTORE: Initial current_mode
# Even if an AssertionError/Exception is raised.
context._mode = current_mode
@contextlib.contextmanager
def scoped_context_layer(context, layer_name=None):
"""Provides context manager for context layer (push/do-something/pop cycle).
.. code-block::
with scoped_context_layer(context):
the_fixture = use_fixture(foo, context, name="foo_42")
"""
# pylint: disable=protected-access
try:
context._push(layer_name)
yield context
finally:
context._pop()
def path_getrootdir(path):
"""
Extract rootdir from path in a platform independent way.
POSIX-PATH EXAMPLE:
rootdir = path_getrootdir("/foo/bar/one.feature")
assert rootdir == "/"
WINDOWS-PATH EXAMPLE:
rootdir = path_getrootdir("D:\\foo\\bar\\one.feature")
assert rootdir == r"D:\"
"""
drive, _ = os.path.splitdrive(path)
if drive:
# -- WINDOWS:
return drive + os.path.sep
# -- POSIX:
return os.path.sep
class ModelRunner(object):
"""
Test runner for a behave model (features).
Provides the core functionality of a test runner and
the functional API needed by model elements.
.. attribute:: aborted
This is set to true when the user aborts a test run
(:exc:`KeyboardInterrupt` exception). Initially: False.
Stored as derived attribute in :attr:`Context.aborted`.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, config, features=None, step_registry=None):
self.config = config
self.features = features or []
self.hooks = {}
self.formatters = []
self.undefined_steps = []
self.step_registry = step_registry
self.capture_controller = CaptureController(config)
self.context = None
self.feature = None
self.hook_failures = 0
# @property
def _get_aborted(self):
value = False
if self.context:
value = self.context.aborted
return value
# @aborted.setter
def _set_aborted(self, value):
# pylint: disable=protected-access
assert self.context, "REQUIRE: context, but context=%r" % self.context
self.context._set_root_attribute("aborted", bool(value))
aborted = property(_get_aborted, _set_aborted,
doc="Indicates that test run is aborted by the user.")
def run_hook(self, name, context, *args):
if not self.config.dry_run and (name in self.hooks):
try:
with context.use_with_user_mode():
self.hooks[name](context, *args)
# except KeyboardInterrupt:
# self.aborted = True
# if name not in ("before_all", "after_all"):
# raise
except Exception as e: # pylint: disable=broad-except
# -- HANDLE HOOK ERRORS:
use_traceback = False
if self.config.verbose:
use_traceback = True
ExceptionUtil.set_traceback(e)
extra = u""
if "tag" in name:
extra = "(tag=%s)" % args[0]
error_text = ExceptionUtil.describe(e, use_traceback).rstrip()
error_message = u"HOOK-ERROR in %s%s: %s" % (name, extra, error_text)
print(error_message)
self.hook_failures += 1
if "tag" in name:
# -- SCENARIO or FEATURE
statement = getattr(context, "scenario", context.feature)
elif "all" in name:
# -- ABORT EXECUTION: For before_all/after_all
self.aborted = True
statement = None
else:
# -- CASE: feature, scenario, step
statement = args[0]
if statement:
# -- CASE: feature, scenario, step
statement.hook_failed = True
if statement.error_message:
# -- NOTE: One exception/failure is already stored.
# Append only error message.
statement.error_message += u"\n"+ error_message
else:
# -- FIRST EXCEPTION/FAILURE:
statement.store_exception_context(e)
statement.error_message = error_message
def setup_capture(self):
if not self.context:
self.context = Context(self)
self.capture_controller.setup_capture(self.context)
def start_capture(self):
self.capture_controller.start_capture()
def stop_capture(self):
self.capture_controller.stop_capture()
def teardown_capture(self):
self.capture_controller.teardown_capture()
def run_model(self, features=None):
# pylint: disable=too-many-branches
if not self.context:
self.context = Context(self)
if self.step_registry is None:
self.step_registry = the_step_registry
if features is None:
features = self.features
# -- ENSURE: context.execute_steps() works in weird cases (hooks, ...)
context = self.context
self.hook_failures = 0
self.setup_capture()
self.run_hook("before_all", context)
run_feature = not self.aborted
failed_count = 0
undefined_steps_initial_size = len(self.undefined_steps)
for feature in features:
if run_feature:
try:
self.feature = feature
for formatter in self.formatters:
formatter.uri(feature.filename)
failed = feature.run(self)
if failed:
failed_count += 1
if self.config.stop or self.aborted:
# -- FAIL-EARLY: After first failure.
run_feature = False
except KeyboardInterrupt:
self.aborted = True
failed_count += 1
run_feature = False
# -- ALWAYS: Report run/not-run feature to reporters.
# REQUIRED-FOR: Summary to keep track of untested features.
for reporter in self.config.reporters:
reporter.feature(feature)
# -- AFTER-ALL:
# pylint: disable=protected-access, broad-except
cleanups_failed = False
self.run_hook("after_all", self.context)
try:
self.context._do_cleanups() # Without dropping the last context layer.
except Exception:
cleanups_failed = True
if self.aborted:
print("\nABORTED: By user.")
for formatter in self.formatters:
formatter.close()
for reporter in self.config.reporters:
reporter.end()
failed = ((failed_count > 0) or self.aborted or (self.hook_failures > 0)
or (len(self.undefined_steps) > undefined_steps_initial_size)
or cleanups_failed)
# XXX-MAYBE: or context.failed)
return failed
def run(self):
"""
Implements the run method by running the model.
"""
self.context = Context(self)
return self.run_model()
class Runner(ModelRunner):
"""
Standard test runner for behave:
* setup paths
* loads environment hooks
* loads step definitions
* select feature files, parses them and creates model (elements)
"""
def __init__(self, config):
super(Runner, self).__init__(config)
self.path_manager = PathManager()
self.base_dir = None
def setup_paths(self):
# pylint: disable=too-many-branches, too-many-statements
if self.config.paths:
if self.config.verbose:
print("Supplied path:", \
", ".join('"%s"' % path for path in self.config.paths))
first_path = self.config.paths[0]
if hasattr(first_path, "filename"):
# -- BETTER: isinstance(first_path, FileLocation):
first_path = first_path.filename
base_dir = first_path
if base_dir.startswith("@"):
# -- USE: behave @features.txt
base_dir = base_dir[1:]
file_locations = self.feature_locations()
if file_locations:
base_dir = os.path.dirname(file_locations[0].filename)
base_dir = os.path.abspath(base_dir)
# supplied path might be to a feature file
if os.path.isfile(base_dir):
if self.config.verbose:
print("Primary path is to a file so using its directory")
base_dir = os.path.dirname(base_dir)
else:
if self.config.verbose:
print('Using default path "./features"')
base_dir = os.path.abspath("features")
# Get the root. This is not guaranteed to be "/" because Windows.
root_dir = path_getrootdir(base_dir)
new_base_dir = base_dir
steps_dir = self.config.steps_dir
environment_file = self.config.environment_file
while True:
if self.config.verbose:
print("Trying base directory:", new_base_dir)
if os.path.isdir(os.path.join(new_base_dir, steps_dir)):
break
if os.path.isfile(os.path.join(new_base_dir, environment_file)):
break
if new_base_dir == root_dir:
break
new_base_dir = os.path.dirname(new_base_dir)
if new_base_dir == root_dir:
if self.config.verbose:
if not self.config.paths:
print('ERROR: Could not find "%s" directory. '\
'Please specify where to find your features.' % \
steps_dir)
else:
print('ERROR: Could not find "%s" directory in your '\
'specified path "%s"' % (steps_dir, base_dir))
message = 'No %s directory in %r' % (steps_dir, base_dir)
raise ConfigError(message)
base_dir = new_base_dir
self.config.base_dir = base_dir
for dirpath, dirnames, filenames in os.walk(base_dir, followlinks=True):
if [fn for fn in filenames if fn.endswith(".feature")]:
break
else:
if self.config.verbose:
if not self.config.paths:
print('ERROR: Could not find any "<name>.feature" files. '\
'Please specify where to find your features.')
else:
print('ERROR: Could not find any "<name>.feature" files '\
'in your specified path "%s"' % base_dir)
raise ConfigError('No feature files in %r' % base_dir)
self.base_dir = base_dir
self.path_manager.add(base_dir)
if not self.config.paths:
self.config.paths = [base_dir]
if base_dir != os.getcwd():
self.path_manager.add(os.getcwd())
def before_all_default_hook(self, context):
"""
Default implementation for :func:`before_all()` hook.
Setup the logging subsystem based on the configuration data.
"""
# pylint: disable=no-self-use
context.config.setup_logging()
def load_hooks(self, filename=None):
filename = filename or self.config.environment_file
hooks_path = os.path.join(self.base_dir, filename)
if os.path.exists(hooks_path):
exec_file(hooks_path, self.hooks)
if "before_all" not in self.hooks:
self.hooks["before_all"] = self.before_all_default_hook
def load_step_definitions(self, extra_step_paths=None):
if extra_step_paths is None:
extra_step_paths = []
# -- Allow steps to import other stuff from the steps dir
# NOTE: Default matcher can be overridden in "environment.py" hook.
steps_dir = os.path.join(self.base_dir, self.config.steps_dir)
step_paths = [steps_dir] + list(extra_step_paths)
load_step_modules(step_paths)
def feature_locations(self):
return collect_feature_locations(self.config.paths)
def run(self):
with self.path_manager:
self.setup_paths()
return self.run_with_paths()
def run_with_paths(self):
self.context = Context(self)
self.load_hooks()
self.load_step_definitions()
# -- ENSURE: context.execute_steps() works in weird cases (hooks, ...)
# self.setup_capture()
# self.run_hook("before_all", self.context)
# -- STEP: Parse all feature files (by using their file location).
feature_locations = [filename for filename in self.feature_locations()
if not self.config.exclude(filename)]
features = parse_features(feature_locations, language=self.config.lang)
self.features.extend(features)
# -- STEP: Run all features.
stream_openers = self.config.outputs
self.formatters = make_formatters(self.config, stream_openers)
return self.run_model()
| 36.704626
| 93
| 0.598151
| 3,632
| 30,942
| 4.935573
| 0.161344
| 0.014839
| 0.006025
| 0.007419
| 0.24696
| 0.171985
| 0.134553
| 0.108446
| 0.0839
| 0.060694
| 0
| 0.002601
| 0.316495
| 30,942
| 842
| 94
| 36.748219
| 0.845004
| 0.321731
| 0
| 0.232759
| 0
| 0
| 0.078317
| 0.003336
| 0
| 0
| 0
| 0
| 0.015086
| 1
| 0.086207
| false
| 0.010776
| 0.034483
| 0.002155
| 0.181034
| 0.038793
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b20e0c8f16f54d5573a17cd7bb380c1b08265f4
| 2,645
|
py
|
Python
|
01_P/P_2_1_1_02/main.py
|
genfifth/generative-design_Code-Package-Python-Mode
|
93fc8435933aa2e9329de77a1177bb34e63dd1c4
|
[
"BSD-2-Clause"
] | 1
|
2019-04-23T16:26:31.000Z
|
2019-04-23T16:26:31.000Z
|
01_P/P_2_1_1_02/main.py
|
QuantumNovice/generative-design_Code-Package-Python-Mode
|
93fc8435933aa2e9329de77a1177bb34e63dd1c4
|
[
"BSD-2-Clause"
] | null | null | null |
01_P/P_2_1_1_02/main.py
|
QuantumNovice/generative-design_Code-Package-Python-Mode
|
93fc8435933aa2e9329de77a1177bb34e63dd1c4
|
[
"BSD-2-Clause"
] | 1
|
2019-01-31T16:05:19.000Z
|
2019-01-31T16:05:19.000Z
|
add_library('pdf')
import random
from datetime import datetime
tileCount = 20
def setup():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
savePDF = False
actStrokeCap = ROUND
actRandomSeed = 0
colorLeft = color(197, 0, 123)
colorRight = color(87, 35, 129)
alphaLeft = 100
alphaRight = 100
def draw():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
if savePDF:
beginRecord(PDF, datetime.now().strftime("%Y%m%d%H%M%S")+".pdf")
background(255)
smooth()
noFill()
strokeCap(actStrokeCap)
random.seed(actRandomSeed)
for gridY in range(tileCount):
for gridX in range(tileCount):
posX = int(width/tileCount*gridX)
posY = int(height/tileCount*gridY)
toggle = random.randint(0,1)
if (toggle == 0):
strokeWeight(mouseX/20)
stroke(colorLeft, alphaLeft)
line(posX, posY, posX+width/tileCount, posY+height/tileCount)
elif (toggle == 1):
strokeWeight(mouseY/20)
stroke(colorRight, alphaRight)
line(posX, posY+width/tileCount, posX+height/tileCount, posY)
if (savePDF):
savePDF = False
endRecord()
def mousePressed():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
actRandomSeed = random.randint(0, 100000)
def keyReleased():
global savePDF, actStrokeCap, actRandomSeed, colorLeft, colorRight, alphaLeft, alphaRight
if (key=='s' or key=='S'):
saveFrame(datetime.now().strftime("%Y%m%d%H%M%S")+".png")
if (key=='p' or key=='P'):
savePDF = True
if key == "1":
actStrokeCap = ROUND
elif key == "2":
actStrokeCap = SQUARE
elif key == "3":
actStrokeCap = PROJECT
elif (key == '4'):
if (colorLeft == color(0)):
colorLeft = color(323, 100, 77)
else:
colorLeft = color(0)
elif (key == '5'):
if (colorRight == color(0)):
colorRight = color(273, 73, 51)
else:
colorRight = color(0)
elif (key == '6'):
if (alphaLeft == 100):
alphaLeft = 50
else:
alphaLeft = 100
elif (key == '7'):
if (alphaRight == 100):
alphaRight = 50
else:
alphaRight = 100
if (key == '0'):
actStrokeCap = ROUND
colorLeft = color(0)
colorRight = color(0)
alphaLeft = 100
alphaRight = 100
| 27.552083
| 93
| 0.565974
| 274
| 2,645
| 5.459854
| 0.324818
| 0.028075
| 0.066845
| 0.101604
| 0.239305
| 0.239305
| 0.239305
| 0.239305
| 0.137701
| 0
| 0
| 0.050801
| 0.315312
| 2,645
| 95
| 94
| 27.842105
| 0.775262
| 0
| 0
| 0.291139
| 0
| 0
| 0.017769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050633
| false
| 0
| 0.025316
| 0
| 0.075949
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b21fe4bf1085238cec917c37ffada209e34d9c0
| 41,499
|
py
|
Python
|
core/dbt/contracts/graph/manifest.py
|
peiwangdb/dbt
|
30e72bc5e2ae950ddf0a1230b0c6406b889bea1a
|
[
"Apache-2.0"
] | null | null | null |
core/dbt/contracts/graph/manifest.py
|
peiwangdb/dbt
|
30e72bc5e2ae950ddf0a1230b0c6406b889bea1a
|
[
"Apache-2.0"
] | 1
|
2021-08-14T03:52:23.000Z
|
2021-08-14T03:52:23.000Z
|
core/dbt/contracts/graph/manifest.py
|
peiwangdb/dbt
|
30e72bc5e2ae950ddf0a1230b0c6406b889bea1a
|
[
"Apache-2.0"
] | 1
|
2021-08-14T03:50:50.000Z
|
2021-08-14T03:50:50.000Z
|
import enum
from dataclasses import dataclass, field
from itertools import chain, islice
from mashumaro import DataClassMessagePackMixin
from multiprocessing.synchronize import Lock
from typing import (
Dict, List, Optional, Union, Mapping, MutableMapping, Any, Set, Tuple,
TypeVar, Callable, Iterable, Generic, cast, AbstractSet, ClassVar
)
from typing_extensions import Protocol
from uuid import UUID
from dbt.contracts.graph.compiled import (
CompileResultNode, ManifestNode, NonSourceCompiledNode, GraphMemberNode
)
from dbt.contracts.graph.parsed import (
ParsedMacro, ParsedDocumentation, ParsedNodePatch, ParsedMacroPatch,
ParsedSourceDefinition, ParsedExposure, HasUniqueID,
UnpatchedSourceDefinition, ManifestNodes
)
from dbt.contracts.graph.unparsed import SourcePatch
from dbt.contracts.files import SourceFile, SchemaSourceFile, FileHash, AnySourceFile
from dbt.contracts.util import (
BaseArtifactMetadata, SourceKey, ArtifactMixin, schema_version
)
from dbt.dataclass_schema import dbtClassMixin
from dbt.exceptions import (
CompilationException,
raise_duplicate_resource_name, raise_compiler_error, warn_or_error,
raise_duplicate_patch_name,
raise_duplicate_macro_patch_name, raise_duplicate_source_patch_name,
)
from dbt.helper_types import PathSet
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.node_types import NodeType
from dbt.ui import line_wrap_message
from dbt import flags
from dbt import tracking
import dbt.utils
NodeEdgeMap = Dict[str, List[str]]
PackageName = str
DocName = str
RefName = str
UniqueID = str
def find_unique_id_for_package(storage, key, package: Optional[PackageName]):
if key not in storage:
return None
pkg_dct: Mapping[PackageName, UniqueID] = storage[key]
if package is None:
if not pkg_dct:
return None
else:
return next(iter(pkg_dct.values()))
elif package in pkg_dct:
return pkg_dct[package]
else:
return None
class DocLookup(dbtClassMixin):
def __init__(self, manifest: 'Manifest'):
self.storage: Dict[str, Dict[PackageName, UniqueID]] = {}
self.populate(manifest)
def get_unique_id(self, key, package: Optional[PackageName]):
return find_unique_id_for_package(self.storage, key, package)
def find(self, key, package: Optional[PackageName], manifest: 'Manifest'):
unique_id = self.get_unique_id(key, package)
if unique_id is not None:
return self.perform_lookup(unique_id, manifest)
return None
def add_doc(self, doc: ParsedDocumentation):
if doc.name not in self.storage:
self.storage[doc.name] = {}
self.storage[doc.name][doc.package_name] = doc.unique_id
def populate(self, manifest):
for doc in manifest.docs.values():
self.add_doc(doc)
def perform_lookup(
self, unique_id: UniqueID, manifest
) -> ParsedDocumentation:
if unique_id not in manifest.docs:
raise dbt.exceptions.InternalException(
f'Doc {unique_id} found in cache but not found in manifest'
)
return manifest.docs[unique_id]
class SourceLookup(dbtClassMixin):
def __init__(self, manifest: 'Manifest'):
self.storage: Dict[Tuple[str, str], Dict[PackageName, UniqueID]] = {}
self.populate(manifest)
def get_unique_id(self, key, package: Optional[PackageName]):
return find_unique_id_for_package(self.storage, key, package)
def find(self, key, package: Optional[PackageName], manifest: 'Manifest'):
unique_id = self.get_unique_id(key, package)
if unique_id is not None:
return self.perform_lookup(unique_id, manifest)
return None
def add_source(self, source: ParsedSourceDefinition):
key = (source.source_name, source.name)
if key not in self.storage:
self.storage[key] = {}
self.storage[key][source.package_name] = source.unique_id
def populate(self, manifest):
for source in manifest.sources.values():
if hasattr(source, 'source_name'):
self.add_source(source)
def perform_lookup(
self, unique_id: UniqueID, manifest: 'Manifest'
) -> ParsedSourceDefinition:
if unique_id not in manifest.sources:
raise dbt.exceptions.InternalException(
f'Source {unique_id} found in cache but not found in manifest'
)
return manifest.sources[unique_id]
class RefableLookup(dbtClassMixin):
# model, seed, snapshot
_lookup_types: ClassVar[set] = set(NodeType.refable())
# refables are actually unique, so the Dict[PackageName, UniqueID] will
# only ever have exactly one value, but doing 3 dict lookups instead of 1
# is not a big deal at all and retains consistency
def __init__(self, manifest: 'Manifest'):
self.storage: Dict[str, Dict[PackageName, UniqueID]] = {}
self.populate(manifest)
def get_unique_id(self, key, package: Optional[PackageName]):
return find_unique_id_for_package(self.storage, key, package)
def find(self, key, package: Optional[PackageName], manifest: 'Manifest'):
unique_id = self.get_unique_id(key, package)
if unique_id is not None:
return self.perform_lookup(unique_id, manifest)
return None
def add_node(self, node: ManifestNode):
if node.resource_type in self._lookup_types:
if node.name not in self.storage:
self.storage[node.name] = {}
self.storage[node.name][node.package_name] = node.unique_id
def populate(self, manifest):
for node in manifest.nodes.values():
self.add_node(node)
def perform_lookup(
self, unique_id: UniqueID, manifest
) -> ManifestNode:
if unique_id not in manifest.nodes:
raise dbt.exceptions.InternalException(
f'Node {unique_id} found in cache but not found in manifest'
)
return manifest.nodes[unique_id]
class AnalysisLookup(RefableLookup):
_lookup_types: ClassVar[set] = set(NodeType.Analysis)
def _search_packages(
current_project: str,
node_package: str,
target_package: Optional[str] = None,
) -> List[Optional[str]]:
if target_package is not None:
return [target_package]
elif current_project == node_package:
return [current_project, None]
else:
return [current_project, node_package, None]
@dataclass
class ManifestMetadata(BaseArtifactMetadata):
"""Metadata for the manifest."""
dbt_schema_version: str = field(
default_factory=lambda: str(WritableManifest.dbt_schema_version)
)
project_id: Optional[str] = field(
default=None,
metadata={
'description': 'A unique identifier for the project',
},
)
user_id: Optional[UUID] = field(
default=None,
metadata={
'description': 'A unique identifier for the user',
},
)
send_anonymous_usage_stats: Optional[bool] = field(
default=None,
metadata=dict(description=(
'Whether dbt is configured to send anonymous usage statistics'
)),
)
adapter_type: Optional[str] = field(
default=None,
metadata=dict(description='The type name of the adapter'),
)
def __post_init__(self):
if tracking.active_user is None:
return
if self.user_id is None:
self.user_id = tracking.active_user.id
if self.send_anonymous_usage_stats is None:
self.send_anonymous_usage_stats = (
not tracking.active_user.do_not_track
)
@classmethod
def default(cls):
return cls(
dbt_schema_version=str(WritableManifest.dbt_schema_version),
)
def _sort_values(dct):
"""Given a dictionary, sort each value. This makes output deterministic,
which helps for tests.
"""
return {k: sorted(v) for k, v in dct.items()}
def build_node_edges(nodes: List[ManifestNode]):
"""Build the forward and backward edges on the given list of ParsedNodes
and return them as two separate dictionaries, each mapping unique IDs to
lists of edges.
"""
backward_edges: Dict[str, List[str]] = {}
# pre-populate the forward edge dict for simplicity
forward_edges: Dict[str, List[str]] = {n.unique_id: [] for n in nodes}
for node in nodes:
backward_edges[node.unique_id] = node.depends_on_nodes[:]
for unique_id in node.depends_on_nodes:
if unique_id in forward_edges.keys():
forward_edges[unique_id].append(node.unique_id)
return _sort_values(forward_edges), _sort_values(backward_edges)
# Build a map of children of macros
def build_macro_edges(nodes: List[Any]):
forward_edges: Dict[str, List[str]] = {
n.unique_id: [] for n in nodes if n.unique_id.startswith('macro') or n.depends_on.macros
}
for node in nodes:
for unique_id in node.depends_on.macros:
if unique_id in forward_edges.keys():
forward_edges[unique_id].append(node.unique_id)
return _sort_values(forward_edges)
def _deepcopy(value):
return value.from_dict(value.to_dict(omit_none=True))
class Locality(enum.IntEnum):
Core = 1
Imported = 2
Root = 3
class Specificity(enum.IntEnum):
Default = 1
Adapter = 2
@dataclass
class MacroCandidate:
locality: Locality
macro: ParsedMacro
def __eq__(self, other: object) -> bool:
if not isinstance(other, MacroCandidate):
return NotImplemented
return self.locality == other.locality
def __lt__(self, other: object) -> bool:
if not isinstance(other, MacroCandidate):
return NotImplemented
if self.locality < other.locality:
return True
if self.locality > other.locality:
return False
return False
@dataclass
class MaterializationCandidate(MacroCandidate):
specificity: Specificity
@classmethod
def from_macro(
cls, candidate: MacroCandidate, specificity: Specificity
) -> 'MaterializationCandidate':
return cls(
locality=candidate.locality,
macro=candidate.macro,
specificity=specificity,
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, MaterializationCandidate):
return NotImplemented
equal = (
self.specificity == other.specificity and
self.locality == other.locality
)
if equal:
raise_compiler_error(
'Found two materializations with the name {} (packages {} and '
'{}). dbt cannot resolve this ambiguity'
.format(self.macro.name, self.macro.package_name,
other.macro.package_name)
)
return equal
def __lt__(self, other: object) -> bool:
if not isinstance(other, MaterializationCandidate):
return NotImplemented
if self.specificity < other.specificity:
return True
if self.specificity > other.specificity:
return False
if self.locality < other.locality:
return True
if self.locality > other.locality:
return False
return False
M = TypeVar('M', bound=MacroCandidate)
class CandidateList(List[M]):
def last(self) -> Optional[ParsedMacro]:
if not self:
return None
self.sort()
return self[-1].macro
def _get_locality(
macro: ParsedMacro, root_project_name: str, internal_packages: Set[str]
) -> Locality:
if macro.package_name == root_project_name:
return Locality.Root
elif macro.package_name in internal_packages:
return Locality.Core
else:
return Locality.Imported
class Searchable(Protocol):
resource_type: NodeType
package_name: str
@property
def search_name(self) -> str:
raise NotImplementedError('search_name not implemented')
N = TypeVar('N', bound=Searchable)
@dataclass
class NameSearcher(Generic[N]):
name: str
package: Optional[str]
nodetypes: List[NodeType]
def _matches(self, model: N) -> bool:
"""Return True if the model matches the given name, package, and type.
If package is None, any package is allowed.
nodetypes should be a container of NodeTypes that implements the 'in'
operator.
"""
if model.resource_type not in self.nodetypes:
return False
if self.name != model.search_name:
return False
return self.package is None or self.package == model.package_name
def search(self, haystack: Iterable[N]) -> Optional[N]:
"""Find an entry in the given iterable by name."""
for model in haystack:
if self._matches(model):
return model
return None
D = TypeVar('D')
@dataclass
class Disabled(Generic[D]):
target: D
MaybeDocumentation = Optional[ParsedDocumentation]
MaybeParsedSource = Optional[Union[
ParsedSourceDefinition,
Disabled[ParsedSourceDefinition],
]]
MaybeNonSource = Optional[Union[
ManifestNode,
Disabled[ManifestNode]
]]
T = TypeVar('T', bound=GraphMemberNode)
def _update_into(dest: MutableMapping[str, T], new_item: T):
"""Update dest to overwrite whatever is at dest[new_item.unique_id] with
new_itme. There must be an existing value to overwrite, and they two nodes
must have the same original file path.
"""
unique_id = new_item.unique_id
if unique_id not in dest:
raise dbt.exceptions.RuntimeException(
f'got an update_{new_item.resource_type} call with an '
f'unrecognized {new_item.resource_type}: {new_item.unique_id}'
)
existing = dest[unique_id]
if new_item.original_file_path != existing.original_file_path:
raise dbt.exceptions.RuntimeException(
f'cannot update a {new_item.resource_type} to have a new file '
f'path!'
)
dest[unique_id] = new_item
# This contains macro methods that are in both the Manifest
# and the MacroManifest
class MacroMethods:
# Just to make mypy happy. There must be a better way.
def __init__(self):
self.macros = []
self.metadata = {}
def find_macro_by_name(
self, name: str, root_project_name: str, package: Optional[str]
) -> Optional[ParsedMacro]:
"""Find a macro in the graph by its name and package name, or None for
any package. The root project name is used to determine priority:
- locally defined macros come first
- then imported macros
- then macros defined in the root project
"""
filter: Optional[Callable[[MacroCandidate], bool]] = None
if package is not None:
def filter(candidate: MacroCandidate) -> bool:
return package == candidate.macro.package_name
candidates: CandidateList = self._find_macros_by_name(
name=name,
root_project_name=root_project_name,
filter=filter,
)
return candidates.last()
def find_generate_macro_by_name(
self, component: str, root_project_name: str
) -> Optional[ParsedMacro]:
"""
The `generate_X_name` macros are similar to regular ones, but ignore
imported packages.
- if there is a `generate_{component}_name` macro in the root
project, return it
- return the `generate_{component}_name` macro from the 'dbt'
internal project
"""
def filter(candidate: MacroCandidate) -> bool:
return candidate.locality != Locality.Imported
candidates: CandidateList = self._find_macros_by_name(
name=f'generate_{component}_name',
root_project_name=root_project_name,
# filter out imported packages
filter=filter,
)
return candidates.last()
def _find_macros_by_name(
self,
name: str,
root_project_name: str,
filter: Optional[Callable[[MacroCandidate], bool]] = None
) -> CandidateList:
"""Find macros by their name.
"""
# avoid an import cycle
from dbt.adapters.factory import get_adapter_package_names
candidates: CandidateList = CandidateList()
packages = set(get_adapter_package_names(self.metadata.adapter_type))
for unique_id, macro in self.macros.items():
if macro.name != name:
continue
candidate = MacroCandidate(
locality=_get_locality(macro, root_project_name, packages),
macro=macro,
)
if filter is None or filter(candidate):
candidates.append(candidate)
return candidates
@dataclass
class ParsingInfo:
static_analysis_parsed_path_count: int = 0
static_analysis_path_count: int = 0
@dataclass
class ManifestStateCheck(dbtClassMixin):
vars_hash: FileHash = field(default_factory=FileHash.empty)
profile_hash: FileHash = field(default_factory=FileHash.empty)
project_hashes: MutableMapping[str, FileHash] = field(default_factory=dict)
@dataclass
class Manifest(MacroMethods, DataClassMessagePackMixin, dbtClassMixin):
"""The manifest for the full graph, after parsing and during compilation.
"""
# These attributes are both positional and by keyword. If an attribute
# is added it must all be added in the __reduce_ex__ method in the
# args tuple in the right position.
nodes: MutableMapping[str, ManifestNode] = field(default_factory=dict)
sources: MutableMapping[str, ParsedSourceDefinition] = field(default_factory=dict)
macros: MutableMapping[str, ParsedMacro] = field(default_factory=dict)
docs: MutableMapping[str, ParsedDocumentation] = field(default_factory=dict)
exposures: MutableMapping[str, ParsedExposure] = field(default_factory=dict)
selectors: MutableMapping[str, Any] = field(default_factory=dict)
disabled: List[CompileResultNode] = field(default_factory=list)
files: MutableMapping[str, AnySourceFile] = field(default_factory=dict)
metadata: ManifestMetadata = field(default_factory=ManifestMetadata)
flat_graph: Dict[str, Any] = field(default_factory=dict)
state_check: ManifestStateCheck = field(default_factory=ManifestStateCheck)
# Moved from the ParseResult object
source_patches: MutableMapping[SourceKey, SourcePatch] = field(default_factory=dict)
# following is from ParseResult
_disabled: MutableMapping[str, List[CompileResultNode]] = field(default_factory=dict)
_doc_lookup: Optional[DocLookup] = field(
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_source_lookup: Optional[SourceLookup] = field(
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_ref_lookup: Optional[RefableLookup] = field(
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_analysis_lookup: Optional[AnalysisLookup] = field(
default=None, metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_parsing_info: ParsingInfo = field(
default_factory=ParsingInfo,
metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
_lock: Lock = field(
default_factory=flags.MP_CONTEXT.Lock,
metadata={'serialize': lambda x: None, 'deserialize': lambda x: None}
)
def __pre_serialize__(self):
# serialization won't work with anything except an empty source_patches because
# tuple keys are not supported, so ensure it's empty
self.source_patches = {}
return self
@classmethod
def __post_deserialize__(cls, obj):
obj._lock = flags.MP_CONTEXT.Lock()
return obj
def sync_update_node(
self, new_node: NonSourceCompiledNode
) -> NonSourceCompiledNode:
"""update the node with a lock. The only time we should want to lock is
when compiling an ephemeral ancestor of a node at runtime, because
multiple threads could be just-in-time compiling the same ephemeral
dependency, and we want them to have a consistent view of the manifest.
If the existing node is not compiled, update it with the new node and
return that. If the existing node is compiled, do not update the
manifest and return the existing node.
"""
with self._lock:
existing = self.nodes[new_node.unique_id]
if getattr(existing, 'compiled', False):
# already compiled -> must be a NonSourceCompiledNode
return cast(NonSourceCompiledNode, existing)
_update_into(self.nodes, new_node)
return new_node
def update_exposure(self, new_exposure: ParsedExposure):
_update_into(self.exposures, new_exposure)
def update_node(self, new_node: ManifestNode):
_update_into(self.nodes, new_node)
def update_source(self, new_source: ParsedSourceDefinition):
_update_into(self.sources, new_source)
def build_flat_graph(self):
"""This attribute is used in context.common by each node, so we want to
only build it once and avoid any concurrency issues around it.
Make sure you don't call this until you're done with building your
manifest!
"""
self.flat_graph = {
'exposures': {
k: v.to_dict(omit_none=False)
for k, v in self.exposures.items()
},
'nodes': {
k: v.to_dict(omit_none=False)
for k, v in self.nodes.items()
},
'sources': {
k: v.to_dict(omit_none=False)
for k, v in self.sources.items()
}
}
def find_disabled_by_name(
self, name: str, package: Optional[str] = None
) -> Optional[ManifestNode]:
searcher: NameSearcher = NameSearcher(
name, package, NodeType.refable()
)
result = searcher.search(self.disabled)
return result
def find_disabled_source_by_name(
self, source_name: str, table_name: str, package: Optional[str] = None
) -> Optional[ParsedSourceDefinition]:
search_name = f'{source_name}.{table_name}'
searcher: NameSearcher = NameSearcher(
search_name, package, [NodeType.Source]
)
result = searcher.search(self.disabled)
if result is not None:
assert isinstance(result, ParsedSourceDefinition)
return result
def _materialization_candidates_for(
self, project_name: str,
materialization_name: str,
adapter_type: Optional[str],
) -> CandidateList:
if adapter_type is None:
specificity = Specificity.Default
else:
specificity = Specificity.Adapter
full_name = dbt.utils.get_materialization_macro_name(
materialization_name=materialization_name,
adapter_type=adapter_type,
with_prefix=False,
)
return CandidateList(
MaterializationCandidate.from_macro(m, specificity)
for m in self._find_macros_by_name(full_name, project_name)
)
def find_materialization_macro_by_name(
self, project_name: str, materialization_name: str, adapter_type: str
) -> Optional[ParsedMacro]:
candidates: CandidateList = CandidateList(chain.from_iterable(
self._materialization_candidates_for(
project_name=project_name,
materialization_name=materialization_name,
adapter_type=atype,
) for atype in (adapter_type, None)
))
return candidates.last()
def get_resource_fqns(self) -> Mapping[str, PathSet]:
resource_fqns: Dict[str, Set[Tuple[str, ...]]] = {}
all_resources = chain(self.exposures.values(), self.nodes.values(), self.sources.values())
for resource in all_resources:
resource_type_plural = resource.resource_type.pluralize()
if resource_type_plural not in resource_fqns:
resource_fqns[resource_type_plural] = set()
resource_fqns[resource_type_plural].add(tuple(resource.fqn))
return resource_fqns
# This is called by 'parse_patch' in the NodePatchParser
def add_patch(
self, source_file: SchemaSourceFile, patch: ParsedNodePatch,
) -> None:
if patch.yaml_key in ['models', 'seeds', 'snapshots']:
unique_id = self.ref_lookup.get_unique_id(patch.name, None)
elif patch.yaml_key == 'analyses':
unique_id = self.analysis_lookup.get_unique_id(patch.name, None)
else:
raise dbt.exceptions.InternalException(
f'Unexpected yaml_key {patch.yaml_key} for patch in '
f'file {source_file.path.original_file_path}'
)
if unique_id is None:
# This will usually happen when a node is disabled
return
# patches can't be overwritten
node = self.nodes.get(unique_id)
if node:
if node.patch_path:
package_name, existing_file_path = node.patch_path.split('://')
raise_duplicate_patch_name(patch, existing_file_path)
source_file.append_patch(patch.yaml_key, unique_id)
node.patch(patch)
def add_macro_patch(
self, source_file: SchemaSourceFile, patch: ParsedMacroPatch,
) -> None:
# macros are fully namespaced
unique_id = f'macro.{patch.package_name}.{patch.name}'
macro = self.macros.get(unique_id)
if not macro:
warn_or_error(
f'WARNING: Found documentation for macro "{patch.name}" '
f'which was not found'
)
return
if macro.patch_path:
package_name, existing_file_path = macro.patch_path.split('://')
raise_duplicate_macro_patch_name(patch, existing_file_path)
source_file.macro_patches[patch.name] = unique_id
macro.patch(patch)
def add_source_patch(
self, source_file: SchemaSourceFile, patch: SourcePatch,
) -> None:
# source patches must be unique
key = (patch.overrides, patch.name)
if key in self.source_patches:
raise_duplicate_source_patch_name(patch, self.source_patches[key])
self.source_patches[key] = patch
source_file.source_patches.append(key)
def get_used_schemas(self, resource_types=None):
return frozenset({
(node.database, node.schema) for node in
chain(self.nodes.values(), self.sources.values())
if not resource_types or node.resource_type in resource_types
})
def get_used_databases(self):
return frozenset(
x.database for x in
chain(self.nodes.values(), self.sources.values())
)
# This is used in dbt.task.rpc.sql_commands 'add_new_refs'
def deepcopy(self):
return Manifest(
nodes={k: _deepcopy(v) for k, v in self.nodes.items()},
sources={k: _deepcopy(v) for k, v in self.sources.items()},
macros={k: _deepcopy(v) for k, v in self.macros.items()},
docs={k: _deepcopy(v) for k, v in self.docs.items()},
exposures={k: _deepcopy(v) for k, v in self.exposures.items()},
selectors={k: _deepcopy(v) for k, v in self.selectors.items()},
metadata=self.metadata,
disabled=[_deepcopy(n) for n in self.disabled],
files={k: _deepcopy(v) for k, v in self.files.items()},
state_check=_deepcopy(self.state_check),
)
def build_parent_and_child_maps(self):
edge_members = list(chain(
self.nodes.values(),
self.sources.values(),
self.exposures.values(),
))
forward_edges, backward_edges = build_node_edges(edge_members)
self.child_map = forward_edges
self.parent_map = backward_edges
def build_macro_child_map(self):
edge_members = list(chain(
self.nodes.values(),
self.macros.values(),
))
forward_edges = build_macro_edges(edge_members)
return forward_edges
def writable_manifest(self):
self.build_parent_and_child_maps()
return WritableManifest(
nodes=self.nodes,
sources=self.sources,
macros=self.macros,
docs=self.docs,
exposures=self.exposures,
selectors=self.selectors,
metadata=self.metadata,
disabled=self.disabled,
child_map=self.child_map,
parent_map=self.parent_map,
)
def write(self, path):
self.writable_manifest().write(path)
# Called in dbt.compilation.Linker.write_graph and
# dbt.graph.queue.get and ._include_in_cost
def expect(self, unique_id: str) -> GraphMemberNode:
if unique_id in self.nodes:
return self.nodes[unique_id]
elif unique_id in self.sources:
return self.sources[unique_id]
elif unique_id in self.exposures:
return self.exposures[unique_id]
else:
# something terrible has happened
raise dbt.exceptions.InternalException(
'Expected node {} not found in manifest'.format(unique_id)
)
@property
def doc_lookup(self) -> DocLookup:
if self._doc_lookup is None:
self._doc_lookup = DocLookup(self)
return self._doc_lookup
def rebuild_doc_lookup(self):
self._doc_lookup = DocLookup(self)
@property
def source_lookup(self) -> SourceLookup:
if self._source_lookup is None:
self._source_lookup = SourceLookup(self)
return self._source_lookup
def rebuild_source_lookup(self):
self._source_lookup = SourceLookup(self)
@property
def ref_lookup(self) -> RefableLookup:
if self._ref_lookup is None:
self._ref_lookup = RefableLookup(self)
return self._ref_lookup
def rebuild_ref_lookup(self):
self._ref_lookup = RefableLookup(self)
@property
def analysis_lookup(self) -> AnalysisLookup:
if self._analysis_lookup is None:
self._analysis_lookup = AnalysisLookup(self)
return self._analysis_lookup
# Called by dbt.parser.manifest._resolve_refs_for_exposure
# and dbt.parser.manifest._process_refs_for_node
def resolve_ref(
self,
target_model_name: str,
target_model_package: Optional[str],
current_project: str,
node_package: str,
) -> MaybeNonSource:
node: Optional[ManifestNode] = None
disabled: Optional[ManifestNode] = None
candidates = _search_packages(
current_project, node_package, target_model_package
)
for pkg in candidates:
node = self.ref_lookup.find(target_model_name, pkg, self)
if node is not None and node.config.enabled:
return node
# it's possible that the node is disabled
if disabled is None:
disabled = self.find_disabled_by_name(
target_model_name, pkg
)
if disabled is not None:
return Disabled(disabled)
return None
# Called by dbt.parser.manifest._resolve_sources_for_exposure
# and dbt.parser.manifest._process_source_for_node
def resolve_source(
self,
target_source_name: str,
target_table_name: str,
current_project: str,
node_package: str
) -> MaybeParsedSource:
key = (target_source_name, target_table_name)
candidates = _search_packages(current_project, node_package)
source: Optional[ParsedSourceDefinition] = None
disabled: Optional[ParsedSourceDefinition] = None
for pkg in candidates:
source = self.source_lookup.find(key, pkg, self)
if source is not None and source.config.enabled:
return source
if disabled is None:
disabled = self.find_disabled_source_by_name(
target_source_name, target_table_name, pkg
)
if disabled is not None:
return Disabled(disabled)
return None
# Called by DocsRuntimeContext.doc
def resolve_doc(
self,
name: str,
package: Optional[str],
current_project: str,
node_package: str,
) -> Optional[ParsedDocumentation]:
"""Resolve the given documentation. This follows the same algorithm as
resolve_ref except the is_enabled checks are unnecessary as docs are
always enabled.
"""
candidates = _search_packages(
current_project, node_package, package
)
for pkg in candidates:
result = self.doc_lookup.find(name, pkg, self)
if result is not None:
return result
return None
# Called by RunTask.defer_to_manifest
def merge_from_artifact(
self,
adapter,
other: 'WritableManifest',
selected: AbstractSet[UniqueID],
) -> None:
"""Given the selected unique IDs and a writable manifest, update this
manifest by replacing any unselected nodes with their counterpart.
Only non-ephemeral refable nodes are examined.
"""
refables = set(NodeType.refable())
merged = set()
for unique_id, node in other.nodes.items():
current = self.nodes.get(unique_id)
if current and (
node.resource_type in refables and
not node.is_ephemeral and
unique_id not in selected and
not adapter.get_relation(
current.database, current.schema, current.identifier
)
):
merged.add(unique_id)
self.nodes[unique_id] = node.replace(deferred=True)
# log up to 5 items
sample = list(islice(merged, 5))
logger.debug(
f'Merged {len(merged)} items from state (sample: {sample})'
)
# Methods that were formerly in ParseResult
def add_macro(self, source_file: SourceFile, macro: ParsedMacro):
if macro.unique_id in self.macros:
# detect that the macro exists and emit an error
other_path = self.macros[macro.unique_id].original_file_path
# subtract 2 for the "Compilation Error" indent
# note that the line wrap eats newlines, so if you want newlines,
# this is the result :(
msg = line_wrap_message(
f'''\
dbt found two macros named "{macro.name}" in the project
"{macro.package_name}".
To fix this error, rename or remove one of the following
macros:
- {macro.original_file_path}
- {other_path}
''',
subtract=2
)
raise_compiler_error(msg)
self.macros[macro.unique_id] = macro
source_file.macros.append(macro.unique_id)
def has_file(self, source_file: SourceFile) -> bool:
key = source_file.file_id
if key is None:
return False
if key not in self.files:
return False
my_checksum = self.files[key].checksum
return my_checksum == source_file.checksum
def add_source(
self, source_file: SchemaSourceFile, source: UnpatchedSourceDefinition
):
# sources can't be overwritten!
_check_duplicates(source, self.sources)
self.sources[source.unique_id] = source # type: ignore
source_file.sources.append(source.unique_id)
def add_node_nofile(self, node: ManifestNodes):
# nodes can't be overwritten!
_check_duplicates(node, self.nodes)
self.nodes[node.unique_id] = node
def add_node(self, source_file: AnySourceFile, node: ManifestNodes, test_from=None):
self.add_node_nofile(node)
if isinstance(source_file, SchemaSourceFile):
assert test_from
source_file.add_test(node.unique_id, test_from)
else:
source_file.nodes.append(node.unique_id)
def add_exposure(self, source_file: SchemaSourceFile, exposure: ParsedExposure):
_check_duplicates(exposure, self.exposures)
self.exposures[exposure.unique_id] = exposure
source_file.exposures.append(exposure.unique_id)
def add_disabled_nofile(self, node: CompileResultNode):
if node.unique_id in self._disabled:
self._disabled[node.unique_id].append(node)
else:
self._disabled[node.unique_id] = [node]
def add_disabled(self, source_file: AnySourceFile, node: CompileResultNode, test_from=None):
self.add_disabled_nofile(node)
if isinstance(source_file, SchemaSourceFile):
assert test_from
source_file.add_test(node.unique_id, test_from)
else:
source_file.nodes.append(node.unique_id)
def add_doc(self, source_file: SourceFile, doc: ParsedDocumentation):
_check_duplicates(doc, self.docs)
self.docs[doc.unique_id] = doc
source_file.docs.append(doc.unique_id)
# end of methods formerly in ParseResult
# Provide support for copy.deepcopy() - we just need to avoid the lock!
# pickle and deepcopy use this. It returns a callable object used to
# create the initial version of the object and a tuple of arguments
# for the object, i.e. the Manifest.
# The order of the arguments must match the order of the attributes
# in the Manifest class declaration, because they are used as
# positional arguments to construct a Manifest.
def __reduce_ex__(self, protocol):
args = (
self.nodes,
self.sources,
self.macros,
self.docs,
self.exposures,
self.selectors,
self.disabled,
self.files,
self.metadata,
self.flat_graph,
self.state_check,
self.source_patches,
self._disabled,
self._doc_lookup,
self._source_lookup,
self._ref_lookup,
)
return self.__class__, args
class MacroManifest(MacroMethods):
def __init__(self, macros):
self.macros = macros
self.metadata = ManifestMetadata()
# This is returned by the 'graph' context property
# in the ProviderContext class.
self.flat_graph = {}
AnyManifest = Union[Manifest, MacroManifest]
@dataclass
@schema_version('manifest', 2)
class WritableManifest(ArtifactMixin):
nodes: Mapping[UniqueID, ManifestNode] = field(
metadata=dict(description=(
'The nodes defined in the dbt project and its dependencies'
))
)
sources: Mapping[UniqueID, ParsedSourceDefinition] = field(
metadata=dict(description=(
'The sources defined in the dbt project and its dependencies'
))
)
macros: Mapping[UniqueID, ParsedMacro] = field(
metadata=dict(description=(
'The macros defined in the dbt project and its dependencies'
))
)
docs: Mapping[UniqueID, ParsedDocumentation] = field(
metadata=dict(description=(
'The docs defined in the dbt project and its dependencies'
))
)
exposures: Mapping[UniqueID, ParsedExposure] = field(
metadata=dict(description=(
'The exposures defined in the dbt project and its dependencies'
))
)
selectors: Mapping[UniqueID, Any] = field(
metadata=dict(description=(
'The selectors defined in selectors.yml'
))
)
disabled: Optional[List[CompileResultNode]] = field(metadata=dict(
description='A list of the disabled nodes in the target'
))
parent_map: Optional[NodeEdgeMap] = field(metadata=dict(
description='A mapping from child nodes to their dependencies',
))
child_map: Optional[NodeEdgeMap] = field(metadata=dict(
description='A mapping from parent nodes to their dependents',
))
metadata: ManifestMetadata = field(metadata=dict(
description='Metadata about the manifest',
))
def _check_duplicates(
value: HasUniqueID, src: Mapping[str, HasUniqueID]
):
if value.unique_id in src:
raise_duplicate_resource_name(value, src[value.unique_id])
K_T = TypeVar('K_T')
V_T = TypeVar('V_T')
def _expect_value(
key: K_T, src: Mapping[K_T, V_T], old_file: SourceFile, name: str
) -> V_T:
if key not in src:
raise CompilationException(
'Expected to find "{}" in cached "result.{}" based '
'on cached file information: {}!'
.format(key, name, old_file)
)
return src[key]
| 34.669173
| 98
| 0.644738
| 4,828
| 41,499
| 5.357291
| 0.119925
| 0.030311
| 0.013957
| 0.002977
| 0.32051
| 0.25405
| 0.212063
| 0.173014
| 0.136633
| 0.114595
| 0
| 0.000497
| 0.272946
| 41,499
| 1,196
| 99
| 34.698161
| 0.856755
| 0.114196
| 0
| 0.266963
| 0
| 0
| 0.06064
| 0.007707
| 0
| 0
| 0
| 0
| 0.003337
| 1
| 0.100111
| false
| 0
| 0.028921
| 0.012236
| 0.313682
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b2234f49a8b57fe4bf6fd97f1ef5ca5137dfade
| 2,342
|
py
|
Python
|
Apps/phdigitalshadows/dsapi/service/infrastructure_service.py
|
ryanbsaunders/phantom-apps
|
1befda793a08d366fbd443894f993efb1baf9635
|
[
"Apache-2.0"
] | 74
|
2019-10-22T02:00:53.000Z
|
2022-03-15T12:56:13.000Z
|
Apps/phdigitalshadows/dsapi/service/infrastructure_service.py
|
ryanbsaunders/phantom-apps
|
1befda793a08d366fbd443894f993efb1baf9635
|
[
"Apache-2.0"
] | 375
|
2019-10-22T20:53:50.000Z
|
2021-11-09T21:28:43.000Z
|
Apps/phdigitalshadows/dsapi/service/infrastructure_service.py
|
ryanbsaunders/phantom-apps
|
1befda793a08d366fbd443894f993efb1baf9635
|
[
"Apache-2.0"
] | 175
|
2019-10-23T15:30:42.000Z
|
2021-11-05T21:33:31.000Z
|
# File: infrastructure_service.py
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
from .ds_base_service import DSBaseService
from .ds_find_service import DSFindService
from ..model.infrastructure import Infrastructure
class InfrastructureService(DSFindService):
def __init__(self, ds_api_key, ds_api_secret_key, proxy=None):
super(InfrastructureService, self).__init__(ds_api_key, ds_api_secret_key, proxy=proxy)
def find_all(self, view=None):
"""
Streams all infrastructure objects retrieved from the Digital Shadows API.
:param view: InfrastructureView
:return: Infrastructure generator
"""
if view is None:
view = InfrastructureService.infrastructure_view()
return self._find_all('/api/ip-ports',
view,
Infrastructure)
def find_all_pages(self, view=None):
"""
Streams all infrastructure objects retrieved from the Digital Shadows API in page groups.
:param view: InfrastructureView
:return: Infrastructure generator
"""
if view is None:
view = Infrastructure.infrastructure_view()
return self._find_all_pages('/api/ip-ports',
view,
Infrastructure)
@staticmethod
@DSBaseService.paginated(size=500)
@DSBaseService.sorted('published')
def infrastructure_view(detectedopen='ALL', domainname=None, detectedclosed=False, markedclosed=False,
severities=None, alerted=False, reverse=None):
view = {
'filter': {
'detectedOpen': detectedopen,
'severities': [] if severities is None else severities,
'alerted': 'true' if alerted else 'false',
'markedClosed': 'true' if markedclosed else 'false',
'detectedClosed': 'true' if detectedclosed else 'false'
}
}
if domainname is not None:
view['filter']['domainName'] = domainname
if reverse is not None:
view['sort'] = {
'direction': 'ASCENDING' if reverse else 'DESCENDING',
'property': 'published'
}
return view
| 32.985915
| 106
| 0.599488
| 226
| 2,342
| 6.061947
| 0.340708
| 0.029197
| 0.011679
| 0.014599
| 0.347445
| 0.306569
| 0.255474
| 0.255474
| 0.216058
| 0.216058
| 0
| 0.004345
| 0.312126
| 2,342
| 70
| 107
| 33.457143
| 0.846058
| 0.173783
| 0
| 0.15
| 0
| 0
| 0.103076
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.075
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b226704f6cb4e708962ce8718453e73c2ce6810
| 5,562
|
py
|
Python
|
src/find_genes_by_location/find_genes_by_location.py
|
NCBI-Codeathons/Identify-antiphage-defense-systems-in-the-bacterial-pangenome
|
b1eb83118268ada50e90f979347e47e055a51029
|
[
"MIT"
] | 3
|
2020-07-06T18:23:47.000Z
|
2020-07-15T06:41:44.000Z
|
src/find_genes_by_location/find_genes_by_location.py
|
NCBI-Codeathons/Identify-antiphage-defense-systems-in-the-bacterial-pangenome
|
b1eb83118268ada50e90f979347e47e055a51029
|
[
"MIT"
] | 5
|
2020-07-09T12:15:07.000Z
|
2020-07-10T17:23:50.000Z
|
src/find_genes_by_location/find_genes_by_location.py
|
NCBI-Codeathons/Identify-antiphage-defense-systems-in-the-bacterial-pangenome
|
b1eb83118268ada50e90f979347e47e055a51029
|
[
"MIT"
] | 3
|
2020-07-06T18:25:24.000Z
|
2020-07-06T19:50:56.000Z
|
import argparse
from collections import defaultdict
import csv
from dataclasses import dataclass, field
from enum import Enum, unique, auto
import os
import sys
import tempfile
import yaml
import zipfile
import gffutils
from google.protobuf import json_format
from ncbi.datasets.v1alpha1 import dataset_catalog_pb2
from ncbi.datasets.v1alpha1.reports import assembly_pb2
from ncbi.datasets.reports.report_reader import DatasetsReportReader
def retrieve_assembly_report(zip_in, catalog, assm_acc: str) -> assembly_pb2.AssemblyDataReport:
report_files = get_catalog_files_for_assembly(catalog, dataset_catalog_pb2.File.FileType.DATA_REPORT, assm_acc)
for path in report_files:
yaml = zip_in.read(path)
rpt_rdr = DatasetsReportReader()
return rpt_rdr.assembly_report(yaml)
def retrieve_data_catalog(zip_in) -> dataset_catalog_pb2.Catalog:
catalog_json = zip_in.read('ncbi_dataset/data/dataset_catalog.json')
return json_format.Parse(catalog_json, dataset_catalog_pb2.Catalog())
def get_catalog_files_for_assembly(catalog: dataset_catalog_pb2.Catalog, desired_filetype: dataset_catalog_pb2.File.FileType, assm_acc: str):
report_files = get_catalog_files(catalog, desired_filetype, assm_acc)
filepaths = []
for assm_acc, paths in report_files.items():
filepaths.extend(paths)
return filepaths
def get_catalog_files(catalog: dataset_catalog_pb2.Catalog, desired_filetype: dataset_catalog_pb2.File.FileType, assm_acc: str = None):
files = defaultdict(list)
for assm in catalog.assemblies:
acc = assm.accession
if assm_acc and assm_acc != acc:
continue
for f in assm.files:
filepath = os.path.join('ncbi_dataset', 'data', f.file_path)
if f.file_type == desired_filetype:
files[acc].append(filepath)
return files
def get_zip_file_for_acc(acc, path):
fname = os.path.join(path, f'{acc}.zip')
if os.path.isfile(fname):
return fname
return None
@dataclass
class Gene:
id: str
feat_type: str
name: str
chrom: str
strand: str
range_start: int
range_stop: int
protein_accession: str = ""
def get_fields(self):
return [self.feat_type, self.name, self.range_start, self.range_stop, self.protein_accession]
def name_val(self):
return self.protein_accession if self.protein_accession else self.name
def find_genes_by_loc(gff3_db, csvout, assm_acc, seq_acc, start, stop, extra_fields):
found_genes = []
feat_types = ('gene', 'pseudogene')
for gene in gff3_db.region(seqid=seq_acc, start=start, end=stop, featuretype=feat_types, completely_within=False):
gene_name = gene.attributes.get('Name', None)[0]
prot_acc = ""
if gene.attributes['gene_biotype'][0] == 'protein_coding':
cds = list(gff3_db.children(gene, featuretype='CDS'))
prot_acc = cds[0].attributes.get('protein_id', None)[0]
geneobj = Gene(
gene.id,
gene.featuretype,
gene_name,
gene.chrom,
gene.strand,
gene.start,
gene.stop,
prot_acc,
)
csvout.writerow([assm_acc, seq_acc, start, stop, *extra_fields, *geneobj.get_fields()])
found_genes.append(geneobj)
return found_genes
class FindGenesByLoc:
default_packages_dir = os.path.join('var', 'data', 'packages')
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--packages-dir', type=str, default=self.default_packages_dir,
help=f'root of input data directory [{self.default_packages_dir}]')
parser.add_argument('--locs', type=str, help='file containing genomic locations')
self.args = parser.parse_args()
self.writer = csv.writer(sys.stdout, dialect='excel-tab')
def read_data(self):
for row in csv.reader(iter(sys.stdin.readline, ''), dialect='excel-tab'):
yield row
def run(self):
for assm_acc, seq_acc, start, stop, *extra in self.read_data():
self.find_all_for_location(assm_acc, seq_acc, start, stop, extra)
def process_loc_for_gff(self, zin, gff_fname, assm_acc, seq_acc, start, stop, extra_fields):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpfile.write(zin.read(gff_fname))
db = gffutils.create_db(
tmpfile.name,
dbfn=':memory:',
force=True,
keep_order=True,
merge_strategy='merge',
sort_attribute_values=True
)
find_genes_by_loc(db, self.writer, assm_acc, seq_acc, start, stop, extra_fields)
def find_all_for_location(self, assm_acc, seq_acc, start, stop, extra_fields):
zip_file = get_zip_file_for_acc(assm_acc, self.args.packages_dir)
try:
with zipfile.ZipFile(zip_file, 'r') as zin:
catalog = retrieve_data_catalog(zin)
gff_files = get_catalog_files(catalog, dataset_catalog_pb2.File.FileType.GFF3)
for assm_acc, gff_files in gff_files.items():
report = retrieve_assembly_report(zin, catalog, assm_acc)
for gff_fname in gff_files:
self.process_loc_for_gff(zin, gff_fname, assm_acc, seq_acc, start, stop, extra_fields)
except zipfile.BadZipFile:
print(f'{zip_file} is not a zip file')
if __name__ == '__main__':
FindGenesByLoc().run()
| 36.834437
| 141
| 0.670262
| 731
| 5,562
| 4.829001
| 0.243502
| 0.037677
| 0.043343
| 0.029462
| 0.192351
| 0.169405
| 0.162606
| 0.132011
| 0.094618
| 0.073088
| 0
| 0.005399
| 0.234088
| 5,562
| 150
| 142
| 37.08
| 0.823239
| 0
| 0
| 0
| 0
| 0
| 0.056465
| 0.012048
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105691
| false
| 0
| 0.121951
| 0.01626
| 0.390244
| 0.00813
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b22ef6d7e0edee04fb293d0c3cd3eec5a122d66
| 1,307
|
py
|
Python
|
api/api/pokemon/views.py
|
farnswj1/PokemonAPI
|
b6fc4dfe8c0fde6b4560455dd37e61b6a0d2ea27
|
[
"MIT"
] | null | null | null |
api/api/pokemon/views.py
|
farnswj1/PokemonAPI
|
b6fc4dfe8c0fde6b4560455dd37e61b6a0d2ea27
|
[
"MIT"
] | null | null | null |
api/api/pokemon/views.py
|
farnswj1/PokemonAPI
|
b6fc4dfe8c0fde6b4560455dd37e61b6a0d2ea27
|
[
"MIT"
] | null | null | null |
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
CreateAPIView,
UpdateAPIView,
DestroyAPIView
)
from .models import Pokemon
from .serializers import PokemonSerializer
from .filters import PokemonFilterSet
# Create your views here.
class PokemonListAPIView(ListAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
filterset_class = PokemonFilterSet
@method_decorator(cache_page(7200))
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PokemonDetailAPIView(RetrieveAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
@method_decorator(cache_page(7200))
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
class PokemonCreateAPIView(CreateAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
class PokemonUpdateAPIView(UpdateAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
class PokemonDeleteAPIView(DestroyAPIView):
queryset = Pokemon.objects.all()
serializer_class = PokemonSerializer
| 27.229167
| 52
| 0.75746
| 128
| 1,307
| 7.632813
| 0.367188
| 0.076766
| 0.11259
| 0.127943
| 0.488229
| 0.488229
| 0.488229
| 0.313204
| 0.186285
| 0.186285
| 0
| 0.007266
| 0.157613
| 1,307
| 47
| 53
| 27.808511
| 0.880109
| 0.017598
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.176471
| 0.058824
| 0.764706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b23ef94d3d317043d5fc3a13457402a61c1b88c
| 9,546
|
py
|
Python
|
plugins/action/normalize_gitlab_cfg.py
|
sma-de/ansible-collections-gitlab
|
5da99b04722fc016d3e8589635fcbb3579dcfda2
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/action/normalize_gitlab_cfg.py
|
sma-de/ansible-collections-gitlab
|
5da99b04722fc016d3e8589635fcbb3579dcfda2
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/action/normalize_gitlab_cfg.py
|
sma-de/ansible-collections-gitlab
|
5da99b04722fc016d3e8589635fcbb3579dcfda2
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.six import iteritems, string_types
from ansible_collections.smabot.base.plugins.module_utils.plugins.config_normalizing.base import ConfigNormalizerBaseMerger, NormalizerBase, NormalizerNamed, DefaultSetterConstant, DefaultSetterOtherKey
from ansible_collections.smabot.base.plugins.module_utils.utils.dicting import setdefault_none, SUBDICT_METAKEY_ANY, get_subdict
from ansible_collections.smabot.base.plugins.module_utils.utils.utils import ansible_assert
def user_role_to_cfg(username, urole, cfg):
tmp = ['roles', 'subroles'] \
+ urole['path'].replace('/', '/subroles/').split('/')
tmp = get_subdict(cfg, tmp, default_empty=True)
setdefault_none(setdefault_none(tmp, 'members', {}),
urole['level'], []
).append(username)
class ConfigRootNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
self._add_defaultsetter(kwargs,
'random_pwlen', DefaultSetterConstant(80)
)
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
ServersNormalizer(pluginref),
]
super(ConfigRootNormalizer, self).__init__(pluginref, *args, **kwargs)
class ServersNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
ServerInstancesNormalizer(pluginref),
]
super(ServersNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['servers']
class ServerInstancesNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvInstNormalizer(pluginref),
]
super(ServerInstancesNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['instances']
class SrvInstNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
ServerUsersNormalizer(pluginref),
SrvRolesNormalizer(pluginref),
]
super(SrvInstNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return [SUBDICT_METAKEY_ANY]
class SrvRolesBaseNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvRolesMembersNormalizer(pluginref),
## note: for recursive structures, the sub normalizers can only
## be instantiated if the corresponding key actually exists
## to avoid indefinite recursions of death
(SrvSubRolesNormalizer, True),
]
super(SrvRolesBaseNormalizer, self).__init__(
pluginref, *args, **kwargs
)
def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs):
# do config subkey
c = setdefault_none(my_subcfg, 'config', defval={})
setdefault_none(c, 'name', defval=cfgpath_abs[-1])
# build role hierarchy path and parent
if cfgpath_abs[-1] == 'roles':
## top level
parent = []
else:
## subrole
parent = get_subdict(cfg, cfgpath_abs[:-2])
parent = parent['role_abspath']
my_subcfg['role_abspath'] = parent + [c['name']]
c['parent'] = '/'.join(parent)
return my_subcfg
class SrvRolesNormalizer(SrvRolesBaseNormalizer):
def __init__(self, pluginref, *args, **kwargs):
super(SrvRolesNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['roles']
class SrvSubRolesNormalizer(NormalizerBase):
NORMER_CONFIG_PATH = ['subroles']
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvRoleInstNormalizer(pluginref),
]
super(SrvSubRolesNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return type(self).NORMER_CONFIG_PATH
class SrvRoleInstNormalizer(SrvRolesBaseNormalizer):
def __init__(self, pluginref, *args, **kwargs):
super(SrvRoleInstNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return [SUBDICT_METAKEY_ANY]
class SrvRolesMembersNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
super(SrvRolesMembersNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['members']
def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs):
if not my_subcfg:
return my_subcfg
## if it exists, members should be a dict where the keys are
## valid gitlab access levels (like guest or developer) and
## the values should be a list of users
exportcfg = []
my_group = self.get_parentcfg(cfg, cfgpath_abs)
my_group = '/'.join(my_group['role_abspath'])
for (k,ul) in iteritems(my_subcfg):
for u in ul:
exportcfg.append({
'gitlab_group': my_group, 'gitlab_user': u, 'access_level': k
})
my_subcfg['_exportcfg'] = exportcfg
return my_subcfg
class ServerUsersNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
ServerBotsNormalizer(pluginref),
ServerHumansNormalizer(pluginref),
]
super(ServerUsersNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['users']
class ServerUsrBaseNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvUsrNormalizer(pluginref),
]
super(ServerUsrBaseNormalizer, self).__init__(
pluginref, *args, **kwargs
)
class ServerBotsNormalizer(ServerUsrBaseNormalizer):
def __init__(self, pluginref, *args, **kwargs):
super(ServerBotsNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['bots']
class ServerHumansNormalizer(ServerUsrBaseNormalizer):
def __init__(self, pluginref, *args, **kwargs):
super(ServerHumansNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return ['humans']
class SrvUsrNormalizer(NormalizerBase):
def __init__(self, pluginref, *args, **kwargs):
subnorms = kwargs.setdefault('sub_normalizers', [])
subnorms += [
SrvUsrCfgNormalizer(pluginref),
]
self._add_defaultsetter(kwargs,
'pw_access', DefaultSetterConstant(True)
)
super(SrvUsrNormalizer, self).__init__(
pluginref, *args, **kwargs
)
@property
def config_path(self):
return [SUBDICT_METAKEY_ANY]
def _handle_specifics_postsub(self, cfg, my_subcfg, cfgpath_abs):
usr_roles = my_subcfg.get('roles', None)
if usr_roles:
for ur in usr_roles:
user_role_to_cfg(my_subcfg['config']['username'], ur,
self.get_parentcfg(cfg, cfgpath_abs, level=3)
)
return my_subcfg
class SrvUsrCfgNormalizer(NormalizerNamed):
def __init__(self, pluginref, *args, **kwargs):
super(SrvUsrCfgNormalizer, self).__init__(
pluginref, *args, mapkey_lvl=-2, **kwargs
)
self.default_setters['name'] = DefaultSetterOtherKey('username')
@property
def config_path(self):
return ['config']
@property
def name_key(self):
return 'username'
def _handle_specifics_presub(self, cfg, my_subcfg, cfgpath_abs):
mail = my_subcfg.get('email', None)
if not mail:
# if not mail address is explicitly given, check if mail
# template is specified for server, if so use this to
# create address with username as param
tmp = self.get_parentcfg(
cfg, cfgpath_abs, level=3
).get('mail_template', None)
if tmp:
my_subcfg['email'] = tmp.format(
my_subcfg['username'].replace('_', '-')
)
return my_subcfg
class ActionModule(ConfigNormalizerBaseMerger):
def __init__(self, *args, **kwargs):
super(ActionModule, self).__init__(ConfigRootNormalizer(self),
*args, default_merge_vars=['gitlab_cfg_defaults'],
extra_merge_vars_ans=['extra_gitlab_config_maps'],
**kwargs
)
self._supports_check_mode = False
self._supports_async = False
@property
def my_ansvar(self):
return 'gitlab_cfg'
| 27.589595
| 202
| 0.626859
| 903
| 9,546
| 6.314507
| 0.220377
| 0.068397
| 0.096633
| 0.052613
| 0.414241
| 0.409505
| 0.375307
| 0.352859
| 0.291477
| 0.272185
| 0
| 0.001144
| 0.267232
| 9,546
| 345
| 203
| 27.669565
| 0.81401
| 0.056149
| 0
| 0.372807
| 0
| 0
| 0.054195
| 0.002671
| 0
| 0
| 0
| 0
| 0.004386
| 1
| 0.153509
| false
| 0
| 0.026316
| 0.061404
| 0.337719
| 0.004386
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b240627551477bf7c6382038b724993aeef7b0b
| 1,416
|
py
|
Python
|
microservices/validate/tools/dynamodb.py
|
clodonil/pipeline_aws_custom
|
8ca517d0bad48fe528461260093f0035f606f9be
|
[
"Apache-2.0"
] | null | null | null |
microservices/validate/tools/dynamodb.py
|
clodonil/pipeline_aws_custom
|
8ca517d0bad48fe528461260093f0035f606f9be
|
[
"Apache-2.0"
] | null | null | null |
microservices/validate/tools/dynamodb.py
|
clodonil/pipeline_aws_custom
|
8ca517d0bad48fe528461260093f0035f606f9be
|
[
"Apache-2.0"
] | null | null | null |
"""
Tools de integração com o Dynamodb
"""
import boto3
import botocore
import logging
import datetime
import json
import copy
import time
import os
class DyConnect:
def __init__(self, table, region):
self.table = table
self.region = region
def connect(self):
try:
dydb = boto3.resource('dynamodb', region_name=self.region)
conn = dydb.Table(self.table)
return conn
except:
print("Problema na conexao com DynamoDB")
logging.CRITICAL("Problema na conexao com DynamoDB")
return False
def dynamodb_save(self, dados):
conn = self.connect()
if conn:
retorno = conn.put_item(Item=dados)
def dynamodb_query(self, query):
conn = self.connect()
return conn.get_item(Key=query)
def get_dy_template(template_name):
newtemplate = DyConnect(dynamodb['template'], aws_region)
query = {'name': template_name}
stages = newtemplate.dynamodb_query(query)
if 'Item' in stages:
if 'details' in stages['Item']:
return stages['Item']['details']
return False
def get_sharedlibrary_release():
newtemplate = DyConnect(dynamodb['template'], aws_region)
query = {'name': 'sharedlibrary'}
version = newtemplate.dynamodb_query(query)
if 'Item' in version:
return version['Item']['release']
return False
| 24.413793
| 70
| 0.639124
| 164
| 1,416
| 5.402439
| 0.335366
| 0.030474
| 0.038375
| 0.045147
| 0.268623
| 0.205418
| 0.205418
| 0.121896
| 0
| 0
| 0
| 0.001908
| 0.259887
| 1,416
| 57
| 71
| 24.842105
| 0.843511
| 0.024011
| 0
| 0.162791
| 0
| 0
| 0.10917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139535
| false
| 0
| 0.186047
| 0
| 0.511628
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b24d64a863b721f8c91dae3e401a33b896a0b31
| 853
|
py
|
Python
|
scrapy_ddiy/spiders/GlidedSky/glided_sky_001.py
|
LZC6244/scrapy_ddiy
|
1bf7cdd382afd471af0bf7069b377fb364dc4730
|
[
"MIT"
] | 9
|
2021-05-17T02:55:16.000Z
|
2022-03-28T08:36:50.000Z
|
scrapy_ddiy/spiders/GlidedSky/glided_sky_001.py
|
LZC6244/scrapy_ddiy
|
1bf7cdd382afd471af0bf7069b377fb364dc4730
|
[
"MIT"
] | null | null | null |
scrapy_ddiy/spiders/GlidedSky/glided_sky_001.py
|
LZC6244/scrapy_ddiy
|
1bf7cdd382afd471af0bf7069b377fb364dc4730
|
[
"MIT"
] | 1
|
2022-01-23T06:28:31.000Z
|
2022-01-23T06:28:31.000Z
|
# -*- coding: utf-8 -*-
from scrapy import Request
from scrapy_ddiy.utils.spiders.ddiy_base import DdiyBaseSpider
class GlidedSky001Spider(DdiyBaseSpider):
name = 'glided_sky_001'
description = 'GlidedSky 爬虫-基础1'
start_url = 'http://www.glidedsky.com/level/web/crawler-basic-1'
custom_settings = {
'COOKIES_ENABLED': True,
'DOWNLOADER_MIDDLEWARES': {
'scrapy_ddiy.spiders.GlidedSky.glided_sky_downloadmiddleware.GlidedSkyMiddleware': 589,
},
}
def start_requests(self):
yield Request(url=self.start_url, callback=self.parse)
def parse(self, response, **kwargs):
all_number = [int(i) for i in
response.xpath('//div[@class="card-body"]//div[@class="col-md-1"]/text()').getall()]
self.logger.info(f'Sum or web number is {sum(all_number)}')
| 34.12
| 106
| 0.657679
| 104
| 853
| 5.25
| 0.673077
| 0.03663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019146
| 0.203986
| 853
| 24
| 107
| 35.541667
| 0.784978
| 0.024619
| 0
| 0
| 0
| 0.055556
| 0.350602
| 0.189157
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b25fa954e2aca18ad4da138b448689002685921
| 5,125
|
py
|
Python
|
datasets/celeba/celeba_dataset.py
|
google/joint_vae
|
984f456d1a38c6b27e23433aef241dea56f53384
|
[
"Apache-2.0"
] | 35
|
2017-12-15T12:58:15.000Z
|
2020-09-27T05:48:50.000Z
|
datasets/celeba/celeba_dataset.py
|
google/joint_vae
|
984f456d1a38c6b27e23433aef241dea56f53384
|
[
"Apache-2.0"
] | null | null | null |
datasets/celeba/celeba_dataset.py
|
google/joint_vae
|
984f456d1a38c6b27e23433aef241dea56f53384
|
[
"Apache-2.0"
] | 11
|
2017-12-08T06:07:30.000Z
|
2021-10-31T10:36:05.000Z
|
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Provides data for the mnist with attributes dataset.
Provide data loading utilities for an augmented version of the
MNIST dataset which contains the following attributes:
1. Location (digits are translated on a canvas and placed around
one of four locations/regions in the canvas). Each location
is a gaussian placed at four quadrants of the canvas.
2. Scale (We vary scale from 0.4 to 1.0), with two gaussians
placed at 0.5 +- 0.1 and 0.9 +- 0.1 repsectively.
3. Orientation: we vary orientation from -90 to +90 degrees,
sampling actual values from gaussians at +30 +- 10 and
-30 +-10. On a third of the occasions we dont orient the
digit at all which means a rotation of 0 degrees.
The original data after transformations is binarized as per the
procedure described in the following paper:
Salakhutdinov, Ruslan, and Iain Murray. 2008. ``On the Quantitative Analysis of
Deep Belief Networks.'' In Proceedings of the 25th International Conference on
Machine Learning, 872-79.
Author: vrama@
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.data import dataset
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from datasets.celeba.image_decoder import ImageDecodeProcess
# Only provides option to load the binarized version of the dataset.
_FILE_PATTERN = '%s-*'
_SPLIT_TYPE = 'iid'
_DATASET_DIR = '/srv/share/datasets/celeba_for_tf_ig'
_SPLITS_TO_SIZES = {'train': 162770, 'val': 19867, 'test': 19962}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [218 x 178 x 3] RGB image.',
'labels': 'Attributes corresponding to the image.',
}
_NUM_CLASSES_PER_ATTRIBUTE = tuple([2]*18)
def get_split(split_name='train',
split_type="iid",
dataset_dir=None,
image_length=64,
num_classes_per_attribute=None):
"""Gets a dataset tuple with instructions for reading 2D shapes data.
Args:
split_name: A train/test split name.
split_type: str, type of split being loaded "iid" or "comp"
dataset_dir: The base directory of the dataset sources.
num_classes_per_attribute: The number of labels for the classfication
problem corresponding to each attribute. For example, if the first
attribute is "shape" and there are three possible shapes, then
then provide a value 3 in the first index, and so on.
Returns:
A `Dataset` namedtuple.
metadata: A dictionary with some metadata about the dataset we just
constructed.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if split_type is not "iid":
raise ValueError("Only IID split available for CelebA.")
if num_classes_per_attribute is None:
num_classes_per_attribute = _NUM_CLASSES_PER_ATTRIBUTE
if dataset_dir is None or dataset_dir == '':
dataset_dir = _DATASET_DIR
# Load attribute label map file.
label_map_json = os.path.join(dataset_dir,
'attribute_label_map.json')
file_pattern = os.path.join(dataset_dir, _FILE_PATTERN % split_name)
tf.logging.info('Loading from %s file.' % (file_pattern))
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
'image/labels': tf.FixedLenFeature([len(num_classes_per_attribute)], tf.int64),
}
# TODO(vrama): See
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py#L270
# For where changes would need to be made to preprocess the images which
# get loaded.
items_to_handlers = {
'image': ImageDecodeProcess(shape=[218, 178, 3], image_length=64),
'labels': tfexample_decoder.Tensor('image/labels'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
metadata = {
'num_classes_per_attribute': num_classes_per_attribute,
'split_type': _SPLIT_TYPE,
'label_map_json': label_map_json,
}
return dataset.Dataset(
data_sources=file_pattern,
reader=tf.TFRecordReader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS), metadata
| 36.091549
| 123
| 0.724293
| 729
| 5,125
| 4.921811
| 0.390947
| 0.025084
| 0.032609
| 0.055184
| 0.1034
| 0.079989
| 0.04961
| 0.04961
| 0
| 0
| 0
| 0.022082
| 0.195902
| 5,125
| 141
| 124
| 36.347518
| 0.84858
| 0.506341
| 0
| 0
| 0
| 0
| 0.149574
| 0.034455
| 0
| 0
| 0
| 0.007092
| 0
| 1
| 0.017544
| false
| 0
| 0.140351
| 0
| 0.175439
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b269f5ba9e2d6a392abd625b09ccdc699507f3d
| 1,303
|
py
|
Python
|
jorldy/manager/log_manager.py
|
kan-s0/JORLDY
|
44989cf415196604a1ad0383b34085dee6bb1c51
|
[
"Apache-2.0"
] | null | null | null |
jorldy/manager/log_manager.py
|
kan-s0/JORLDY
|
44989cf415196604a1ad0383b34085dee6bb1c51
|
[
"Apache-2.0"
] | null | null | null |
jorldy/manager/log_manager.py
|
kan-s0/JORLDY
|
44989cf415196604a1ad0383b34085dee6bb1c51
|
[
"Apache-2.0"
] | null | null | null |
import os
import datetime, time
import imageio
from pygifsicle import optimize
from torch.utils.tensorboard import SummaryWriter
class LogManager:
def __init__(self, env, id, experiment=None):
self.id = id
now = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
self.path = (
f"./logs/{experiment}/{env}/{id}/{now}/"
if experiment
else f"./logs/{env}/{id}/{now}/"
)
self.writer = SummaryWriter(self.path)
self.stamp = time.time()
def write(self, scalar_dict, frames, step):
for key, value in scalar_dict.items():
self.writer.add_scalar(f"{self.id}/" + key, value, step)
self.writer.add_scalar("all/" + key, value, step)
if "score" in key:
time_delta = int(time.time() - self.stamp)
self.writer.add_scalar(f"{self.id}/{key}_per_time", value, time_delta)
self.writer.add_scalar(f"all/{key}_per_time", value, time_delta)
if len(frames) > 0:
score = scalar_dict["score"]
write_path = os.path.join(self.path, f"{step:010d}_{score}.gif")
imageio.mimwrite(write_path, frames, fps=60)
optimize(write_path)
print(f"...Record episode to {write_path}...")
| 36.194444
| 86
| 0.583269
| 171
| 1,303
| 4.309942
| 0.368421
| 0.067843
| 0.070556
| 0.103121
| 0.166893
| 0.139756
| 0.078697
| 0.078697
| 0
| 0
| 0
| 0.006322
| 0.271681
| 1,303
| 35
| 87
| 37.228571
| 0.770285
| 0
| 0
| 0
| 0
| 0
| 0.153492
| 0.082886
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.166667
| 0
| 0.266667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b27daa674cb67e0f7a35c3fcd65be25c5a4c1db
| 2,676
|
py
|
Python
|
lib/SeparateDriver/ASRDriverParts/UNIInterface.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | null | null | null |
lib/SeparateDriver/ASRDriverParts/UNIInterface.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | null | null | null |
lib/SeparateDriver/ASRDriverParts/UNIInterface.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | 1
|
2020-04-02T01:17:43.000Z
|
2020-04-02T01:17:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: ASRDriverParts/UNIInterface.py
'''
Parts Module for ASR driver UNI interface configuraton
'''
import GlobalModule
from EmCommonLog import decorater_log
from ASRDriverParts.InterfaceBase import InterfaceBase
class UNIInterface(InterfaceBase):
'''
Parts class for ASR driver UNI interface configuraton
'''
@decorater_log
def __init__(self,
vrf_name=None,
if_name=None,
vlan_id=None,
ip_address=None,
subnet_mask=None,
vip_ip_address=None,
hsrp_id=None,
mtu=None,
is_active=True):
'''
Costructor
'''
super(UNIInterface, self).__init__(vrf_name=vrf_name,
if_name=if_name)
self.vlan_id = vlan_id
self.ip_address = ip_address
self.subnet_mask = subnet_mask
self.vip_ip_address = vip_ip_address
self.hsrp_id = hsrp_id
self.mtu = mtu
self.is_active = is_active
@decorater_log
def output_add_command(self):
'''
Command line to add configuration is output.
'''
parame = self._get_param()
self._interface_common_start()
self._append_add_command("standby version 2")
comm_txt = "standby %(hsrp_id)s ip %(vip_ip_address)s"
self._append_add_command(comm_txt, parame)
if self.is_active:
comm_txt = "standby %(hsrp_id)s priority 105"
self._append_add_command(comm_txt, parame)
comm_txt = "standby %(hsrp_id)s preempt"
self._append_add_command(comm_txt, parame)
comm_txt = "ip mtu %(mtu)s"
self._append_add_command(comm_txt, parame)
self._interface_common_end()
GlobalModule.EM_LOGGER.debug(
"uni if command = %s" % (self._tmp_add_command,))
return self._tmp_add_command
@decorater_log
def _get_param(self):
'''
Parameter is acquired from attribute.(dict type)
'''
tmp_param = super(UNIInterface, self)._get_param()
tmp_param.update(
{
"vlan_id": self.vlan_id,
"ip_address": self.ip_address,
"subnet_mask": self.subnet_mask,
"vip_ip_address": self.vip_ip_address,
"hsrp_id": self.hsrp_id,
"mtu": self.mtu,
}
)
return tmp_param
| 33.037037
| 63
| 0.567638
| 300
| 2,676
| 4.716667
| 0.296667
| 0.069965
| 0.050883
| 0.070671
| 0.195053
| 0.195053
| 0.104594
| 0.104594
| 0.056537
| 0
| 0
| 0.005152
| 0.34716
| 2,676
| 80
| 64
| 33.45
| 0.804808
| 0.133782
| 0
| 0.125
| 0
| 0
| 0.09426
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.053571
| 0
| 0.160714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b2847d0ef2aafced05fa68a40e983a929d467d0
| 6,003
|
py
|
Python
|
tools/accuracy_checker/accuracy_checker/annotation_converters/mnist.py
|
AnthonyQuantum/open_model_zoo
|
7d235755e2d17f6186b11243a169966e4f05385a
|
[
"Apache-2.0"
] | 4
|
2021-04-21T02:38:04.000Z
|
2021-10-13T12:15:33.000Z
|
tools/accuracy_checker/accuracy_checker/annotation_converters/mnist.py
|
AnthonyQuantum/open_model_zoo
|
7d235755e2d17f6186b11243a169966e4f05385a
|
[
"Apache-2.0"
] | 6
|
2020-11-13T19:02:47.000Z
|
2022-03-12T00:43:24.000Z
|
tools/accuracy_checker/accuracy_checker/annotation_converters/mnist.py
|
AnthonyQuantum/open_model_zoo
|
7d235755e2d17f6186b11243a169966e4f05385a
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..config import PathField, BoolField
from ..representation import ClassificationAnnotation
from ..utils import read_csv, check_file_existence, read_json
from .format_converter import BaseFormatConverter, ConverterReturn
try:
from PIL import Image
except ImportError:
Image = None
class MNISTCSVFormatConverter(BaseFormatConverter):
"""
MNIST CSV dataset converter. All annotation converters should be derived from BaseFormatConverter class.
"""
# register name for this converter
# this name will be used for converter class look up
__provider__ = 'mnist_csv'
annotation_types = (ClassificationAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'annotation_file': PathField(description="Path to csv file which contain dataset."),
'convert_images': BoolField(
optional=True,
default=False,
description="Allows to convert images from pickle file to user specified directory."
),
'converted_images_dir': PathField(
optional=True, is_directory=True, check_exists=False, description="Path to converted images location."
),
'dataset_meta_file': PathField(
description='path to json file with dataset meta (e.g. label_map, color_encoding)', optional=True
)
})
return configuration_parameters
def configure(self):
"""
This method is responsible for obtaining the necessary parameters
for converting from the command line or config.
"""
self.test_csv_file = self.get_value_from_config('annotation_file')
self.converted_images_dir = self.get_value_from_config('converted_images_dir')
self.convert_images = self.get_value_from_config('convert_images')
if self.convert_images and not self.converted_images_dir:
self.converted_images_dir = self.test_csv_file.parent / 'converted_images'
if not self.converted_images_dir.exists():
self.converted_images_dir.mkdir(parents=True)
if self.convert_images and Image is None:
raise ValueError(
"conversion mnist images requires Pillow installation, please install it before usage"
)
self.dataset_meta = self.get_value_from_config('dataset_meta_file')
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
"""
This method is executed automatically when convert.py is started.
All arguments are automatically got from command line arguments or config file in method configure
Returns:
annotations: list of annotation representation objects.
meta: dictionary with additional dataset level metadata.
"""
annotations = []
check_images = check_content and not self.convert_images
meta = self.generate_meta()
labels_to_id = meta['label_map']
content_errors = None
if check_content:
self.converted_images_dir = self.converted_images_dir or self.test_csv_file.parent / 'converted_images'
if self.converted_images_dir and check_content:
if not self.converted_images_dir.exists():
content_errors = ['{}: does not exist'.format(self.converted_images_dir)]
check_images = False
# read original dataset annotation
annotation_table = read_csv(self.test_csv_file)
num_iterations = len(annotation_table)
for index, annotation in enumerate(annotation_table):
identifier = '{}.png'.format(index)
label = labels_to_id.get(annotation['label'], int(annotation['label']))
if self.convert_images:
image = Image.fromarray(self.convert_image(annotation))
image = image.convert("L")
image.save(str(self.converted_images_dir / identifier))
annotations.append(ClassificationAnnotation(identifier, label))
if check_images:
if not check_file_existence(self.converted_images_dir / identifier):
# add error to errors list if file not found
content_errors.append('{}: does not exist'.format(self.converted_images_dir / identifier))
if progress_callback is not None and index % progress_interval == 0:
progress_callback(index / num_iterations * 100)
return ConverterReturn(annotations, meta, content_errors)
@staticmethod
def convert_image(features):
image = np.zeros((28, 28))
column_template = '{}x{}'
for x in range(28):
for y in range(28):
pixel = int(features[column_template.format(x+1, y+1)])
image[x, y] = pixel
return image
def generate_meta(self):
if not self.dataset_meta:
return {'label_map': {str(i): i for i in range(10)}}
dataset_meta = read_json(self.dataset_meta)
label_map = dataset_meta.get('label_map')
if 'labels' in dataset_meta:
label_map = dict(enumerate(dataset_meta['labels']))
dataset_meta['label_map'] = label_map or {str(i): i for i in range(10)}
return dataset_meta
| 42.274648
| 118
| 0.670165
| 716
| 6,003
| 5.431564
| 0.317039
| 0.069427
| 0.069427
| 0.073541
| 0.163281
| 0.088455
| 0.088455
| 0.071998
| 0
| 0
| 0
| 0.006475
| 0.253873
| 6,003
| 141
| 119
| 42.574468
| 0.8618
| 0.206397
| 0
| 0.044944
| 0
| 0
| 0.123388
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0
| 0.078652
| 0
| 0.224719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b28598ebd5982e3c50306026cc2ae916f9a979c
| 4,511
|
py
|
Python
|
Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/bugdialog.py
|
davidbrownell/Common_EnvironmentEx
|
9e20b79b4de0cb472f65ac08b3de83f9ed8e2ca3
|
[
"BSL-1.0"
] | null | null | null |
Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/bugdialog.py
|
davidbrownell/Common_EnvironmentEx
|
9e20b79b4de0cb472f65ac08b3de83f9ed8e2ca3
|
[
"BSL-1.0"
] | null | null | null |
Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/bugdialog.py
|
davidbrownell/Common_EnvironmentEx
|
9e20b79b4de0cb472f65ac08b3de83f9ed8e2ca3
|
[
"BSL-1.0"
] | 1
|
2020-08-19T17:25:22.000Z
|
2020-08-19T17:25:22.000Z
|
"""\
Dialog to show details of internal errors.
@copyright: 2014-2016 Carsten Grohmann
@copyright: 2017 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import bugdialog_ui
import config
import log
import logging
import sys
import wx
class BugReport(bugdialog_ui.UIBugDialog):
"Dialog to show details of internal errors"
_disabled = False # Flag to prevent dialog popups during test runs.
def __init__(self):
self._disabled = getattr(sys, '_called_from_test', False)
bugdialog_ui.UIBugDialog.__init__(self, None, -1, "")
def SetContent(self, msg, exc):
"""Prepare given exception information and show it as dialog content.
msg: Short description of the action that has raised this error
exc: Caught exception (Exception instance)
see: SetContentEI()"""
if self._disabled:
return
exc_type = exc.__class__.__name__
exc_msg = str(exc)
header = self.st_header.GetLabel() % {'action': msg}
log.exception_orig(header)
self._fill_dialog(exc_msg, exc_type, header)
def SetContentEI(self, exc_type, exc_value, exc_tb, msg=_('An internal error occurred')):
"""Format given exception and add details to dialog.
exc_type: Exception type
exc_value: Exception value
exc_tb: Exception traceback
msg: Short description of the exception
see: SetContent()"""
if self._disabled:
return
# don't use exception() because it overwrites exc_info with 1
logging.error(msg, exc_info=(exc_type, exc_value, exc_tb))
self._fill_dialog(msg, exc_type, _('An internal error occurred'))
def _fill_dialog(self, exc_msg, exc_type, header):
"""Fill the bug dialog
exc_msg: Short exception summary
exc_type: Exception type as string
header: Initial message
see: L{SetContent(), SetContentEI()"""
details = log.getBufferAsString()
if not exc_msg:
exc_msg = _('No summary available')
summary = self.st_summary.GetLabel() % { 'exc_type':exc_type, 'exc_msg':exc_msg }
self.st_header.SetLabel(header)
self.st_summary.SetLabel(summary)
self.tc_details.SetValue(details)
howto = self.tc_howto_report.GetValue()
howto = howto % {'log_file': config.log_file}
self.tc_howto_report.SetValue(howto)
def OnCopy(self, event):
"Copy the dialog content to the clipboard"
text = self.tc_details.GetValue()
if not text:
return
data = wx.TextDataObject(text)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
else:
wx.MessageBox("Unable to open the clipboard", "Error")
def ShowModal(self, **kwargs):
if getattr(sys, '_called_from_test', False):
return wx.ID_OK
super(BugReport, self).ShowModal(**kwargs)
def Show(msg, exc):
"""Wrapper for creating a L{BugReport} dialog and show the details of the given exception instance.
msg: Short description of the action that has raised this error
exc: Caught exception
see ShowEI(), BugReport.SetContent()"""
dialog = BugReport()
dialog.SetContent(msg, exc)
dialog.ShowModal()
dialog.Destroy()
def ShowEI(exc_type, exc_value, exc_tb, msg=None):
"""Wrapper for creating a L{BugReport} dialog and show the given exception details.
exc_type: Exception type
exc_value: Exception value
exc_tb: Exception traceback
msg: Short description of the exception
see: L{Show(), BugReport.SetContent()"""
dialog = BugReport()
dialog.SetContentEI(exc_type, exc_value, exc_tb, msg)
dialog.ShowModal()
dialog.Destroy()
def ShowEnvironmentError(msg, inst):
"""Show EnvironmentError exceptions detailed and user-friendly
msg: Error message
inst: The caught exception"""
details = {'msg':msg, 'type':inst.__class__.__name__}
if inst.filename:
details['filename'] = _('Filename: %s') % inst.filename
if inst.errno is not None and inst.strerror is not None:
details['error'] = '%s - %s' % (inst.errno, inst.strerror)
else:
details['error'] = str(inst.args)
text = _("""%(msg)s
Error type: %(type)s
Error code: %(error)s
%(filename)s""") % details
wx.MessageBox(text, _('Error'), wx.OK | wx.CENTRE | wx.ICON_ERROR)
| 30.073333
| 103
| 0.65573
| 571
| 4,511
| 5.007005
| 0.269702
| 0.031829
| 0.024484
| 0.029381
| 0.289962
| 0.227002
| 0.19972
| 0.151102
| 0.151102
| 0.151102
| 0
| 0.004091
| 0.24141
| 4,511
| 149
| 104
| 30.275168
| 0.831385
| 0.330304
| 0
| 0.175676
| 0
| 0
| 0.124317
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121622
| false
| 0
| 0.081081
| 0
| 0.283784
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b2bddfb3c677b2bd52d34844ad305be0f97c9b1
| 9,330
|
py
|
Python
|
challenges/day14.py
|
Jeffreyo3/AdventOfCode2020
|
8705847a04885d6489eb11acfddf2ff5702d8927
|
[
"MIT"
] | null | null | null |
challenges/day14.py
|
Jeffreyo3/AdventOfCode2020
|
8705847a04885d6489eb11acfddf2ff5702d8927
|
[
"MIT"
] | null | null | null |
challenges/day14.py
|
Jeffreyo3/AdventOfCode2020
|
8705847a04885d6489eb11acfddf2ff5702d8927
|
[
"MIT"
] | null | null | null |
"""
--- Day 14: Docking Data ---
As your ferry approaches the sea port, the captain asks for your help again. The computer system that runs this port isn't compatible with the docking program on the ferry, so the docking parameters aren't being correctly initialized in the docking program's memory.
After a brief inspection, you discover that the sea port's computer system uses a strange bitmask system in its initialization program. Although you don't have the correct decoder chip handy, you can emulate it in software!
The initialization program (your puzzle input) can either update the bitmask or write a value to memory. Values and memory addresses are both 36-bit unsigned integers. For example, ignoring bitmasks for a moment, a line like mem[8] = 11 would write the value 11 to memory address 8.
The bitmask is always given as a string of 36 bits, written with the most significant bit (representing 2^35) on the left and the least significant bit (2^0, that is, the 1s bit) on the right. The current bitmask is applied to values immediately before they are written to memory: a 0 or 1 overwrites the corresponding bit in the value, while an X leaves the bit in the value unchanged.
For example, consider the following program:
mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0
This program starts by specifying a bitmask (mask = ....). The mask it specifies will overwrite two bits in every written value: the 2s bit is overwritten with 0, and the 64s bit is overwritten with 1.
The program then attempts to write the value 11 to memory address 8. By expanding everything out to individual bits, the mask is applied as follows:
value: 000000000000000000000000000000001011 (decimal 11)
mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
result: 000000000000000000000000000001001001 (decimal 73)
So, because of the mask, the value 73 is written to memory address 8 instead. Then, the program tries to write 101 to address 7:
value: 000000000000000000000000000001100101 (decimal 101)
mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
result: 000000000000000000000000000001100101 (decimal 101)
This time, the mask has no effect, as the bits it overwrote were already the values the mask tried to set. Finally, the program tries to write 0 to address 8:
value: 000000000000000000000000000000000000 (decimal 0)
mask: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
result: 000000000000000000000000000001000000 (decimal 64)
64 is written to address 8 instead, overwriting the value that was there previously.
To initialize your ferry's docking program, you need the sum of all values left in memory after the initialization program completes. (The entire 36-bit address space begins initialized to the value 0 at every address.) In the above example, only two values in memory are not zero - 101 (at address 7) and 64 (at address 8) - producing a sum of 165.
Execute the initialization program. What is the sum of all values left in memory after it completes?
"""
f = open("challenges\data\day14data.txt", "r")
def processData(file):
data = []
for x in f:
x=x.strip().replace('\n', '').split(" = ")
data.append((x[0], x[1]))
return data
# Function to convert Decimal number
# to Binary number
def decimalToBinary(n):
return bin(n).replace("0b", "")
def leadingZeros(length, bin_num):
leadingZeros = length - len(bin_num)
return "0"*leadingZeros + bin_num
def initialize(commands):
memory = {}
mask = "X"*36
for c in commands:
if c[0] == "mask":
mask = c[1]
else:
address = c[0][c[0].index("[")+1:len(c[0])-1]
binaryValue = decimalToBinary(int(c[1]))
binary36 = leadingZeros(36, binaryValue)
memory[address] = ""
for i in range(len(mask)):
if mask[i] == "X":
memory[address] += binary36[i]
else:
memory[address] += mask[i]
sum = 0
for val in memory.values():
sum += int("".join(val), 2)
return sum
"""
--- Part Two ---
For some reason, the sea port's computer system still can't communicate with your ferry's docking program. It must be using version 2 of the decoder chip!
A version 2 decoder chip doesn't modify the values being written at all. Instead, it acts as a memory address decoder. Immediately before a value is written to memory, each bit in the bitmask modifies the corresponding bit of the destination memory address in the following way:
If the bitmask bit is 0, the corresponding memory address bit is unchanged.
If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1.
If the bitmask bit is X, the corresponding memory address bit is floating.
A floating bit is not connected to anything and instead fluctuates unpredictably. In practice, this means the floating bits will take on all possible values, potentially causing many memory addresses to be written all at once!
For example, consider the following program:
mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1
When this program goes to write to memory address 42, it first applies the bitmask:
address: 000000000000000000000000000000101010 (decimal 42)
mask: 000000000000000000000000000000X1001X
result: 000000000000000000000000000000X1101X
After applying the mask, four bits are overwritten, three of which are different, and two of which are floating. Floating bits take on every possible combination of values; with two floating bits, four actual memory addresses are written:
000000000000000000000000000000011010 (decimal 26)
000000000000000000000000000000011011 (decimal 27)
000000000000000000000000000000111010 (decimal 58)
000000000000000000000000000000111011 (decimal 59)
Next, the program is about to write to memory address 26 with a different bitmask:
address: 000000000000000000000000000000011010 (decimal 26)
mask: 00000000000000000000000000000000X0XX
result: 00000000000000000000000000000001X0XX
This results in an address with three floating bits, causing writes to eight memory addresses:
000000000000000000000000000000010000 (decimal 16)
000000000000000000000000000000010001 (decimal 17)
000000000000000000000000000000010010 (decimal 18)
000000000000000000000000000000010011 (decimal 19)
000000000000000000000000000000011000 (decimal 24)
000000000000000000000000000000011001 (decimal 25)
000000000000000000000000000000011010 (decimal 26)
000000000000000000000000000000011011 (decimal 27)
The entire 36-bit address space still begins initialized to the value 0 at every address, and you still need the sum of all values left in memory at the end of the program. In this example, the sum is 208.
Execute the initialization program using an emulator for a version 2 decoder chip. What is the sum of all values left in memory after it completes?
"""
def calculateCombinations(bin_address):
combinations = []
# xCount = 0
xPositions = []
for i in range(len(bin_address)):
# find each X and add its idx to a list
if bin_address[i] == "X":
xPositions.append(i)
# xCount += 1
if len(xPositions) > 0:
for i in range(2**(len(xPositions))):
# need to generate all possible combos of 0s & 1s
# w/ leading 0s
possible = decimalToBinary(i)
while len(possible) < len(xPositions):
possible = "0"+possible
combinations.append(possible)
addresses = []
for c in combinations:
# need to insert combination[i] into binary number
# current combo associated idx is in xPositions[i]
newAddress = ""
currPos = 0
for i in range(len(bin_address)):
if currPos < len(xPositions) and i == xPositions[currPos]:
newAddress += c[currPos]
currPos += 1
else:
newAddress += bin_address[i]
addresses.append(newAddress)
return addresses
def initialize_v2(commands):
memory = {}
mask = "X"*36
for c in commands:
if c[0] == "mask":
mask = c[1]
else:
address = c[0][c[0].index("[")+1:len(c[0])-1]
binaryAddress = decimalToBinary(int(address))
binary36 = leadingZeros(36, binaryAddress)
newVal = ""
for i in range(len(mask)):
if mask[i] != "0":
newVal += mask[i]
else:
newVal += binary36[i]
addresses = calculateCombinations(newVal)
for a in addresses:
memory[a] = int(c[1])
sum = 0
for val in memory.values():
sum += val
# print(memory)
return sum
data = processData(f)
# [print(d) for d in data]
sumAllValues = initialize(data)
print("Part 1:", sumAllValues)
sumAllValuesV2 = initialize_v2(data)
print("Part 2:", sumAllValuesV2)
# binary = decimalToBinary(33323)
# binary = leadingZeros(36, binary)
# print(binary)
# combos = initialize_v2([("mask", "100X100X101011111X100000100X11010011"),
# ("mem[33323]", "349380")])
# print(combos)
| 42.217195
| 386
| 0.700536
| 1,227
| 9,330
| 5.317848
| 0.264874
| 0.0259
| 0.011494
| 0.008429
| 0.209349
| 0.166897
| 0.108046
| 0.088123
| 0.070345
| 0.038927
| 0
| 0.161873
| 0.228617
| 9,330
| 221
| 387
| 42.217195
| 0.744755
| 0.375348
| 0
| 0.321429
| 0
| 0
| 0.02309
| 0.009847
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0.011905
| 0.142857
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b2c276716f02206bb780210c6a91cee657ed190
| 2,524
|
py
|
Python
|
src/Dialogs/RegularPolygonDialog.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 41
|
2021-11-24T05:54:08.000Z
|
2022-03-26T10:19:30.000Z
|
src/Dialogs/RegularPolygonDialog.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 1
|
2022-02-28T04:34:51.000Z
|
2022-03-07T10:49:27.000Z
|
src/Dialogs/RegularPolygonDialog.py
|
Lovely-XPP/tkzgeom
|
bf68e139dc05f759542d6611f4dc07f4f2727b92
|
[
"MIT"
] | 10
|
2021-11-24T07:35:17.000Z
|
2022-03-25T18:42:14.000Z
|
from PyQt5 import QtWidgets, uic
from Factory import Factory
from Dialogs.DialogMacros import turn_into_free_point, free_point_checkbox
from Fill.ListWidget import fill_listWidget_with_data, set_selected_id_in_listWidget
import Constant as c
class RegularPolygonDialog(QtWidgets.QDialog):
def __init__(self, scene, data):
"""Construct RegularPolygonDialog."""
super(RegularPolygonDialog, self).__init__()
self.ui = uic.loadUi('regularpolygon.ui', self)
self.scene = scene
self.sides = 3
self.free_point = False
self.data = data
self.ui.buttonBox.accepted.connect(self.accepted)
self.ui.buttonBox.rejected.connect(self.rejected)
self.ui.sides_slider.valueChanged.connect(self.hslider_sides_func)
self.ui.checkBox.stateChanged.connect(lambda x: free_point_checkbox(self, x))
def hslider_sides_func(self, value):
"""Be slider callback function to set sides."""
self.sides = value
self.ui.sides_spin.setValue(value)
def accepted(self):
"""Create new regular polygon with settings."""
A, B = self.data
angle = -(self.sides - 2) * 180 / self.sides
polygon = [A, B]
for _ in range(self.sides - 2):
item = Factory.create_empty_item('point', c.Point.Definition.ROTATION)
definition = {'A': A, 'B': B, 'angle': angle}
id_ = Factory.next_id(item, definition, self.scene.project_data.items)
item.item["id"] = id_
item.item["definition"] = definition
if self.free_point:
item = turn_into_free_point(item, self.scene)
self.scene.project_data.add(item)
A = B
B = item.item["id"]
polygon.append(item.item["id"])
item = Factory.create_empty_item('polygon', None)
definition = polygon
item.item["id"] = Factory.next_id(item, definition, self.scene.project_data.items)
item.item["definition"] = definition
self.scene.project_data.add(item)
self.scene.project_data.recompute_canvas(*self.scene.init_canvas_dims)
current_row_old = self.scene.ui.listWidget.currentRow()
fill_listWidget_with_data(self.scene.project_data, self.scene.ui.listWidget, self.scene.current_tab_idx)
set_selected_id_in_listWidget(self.scene, current_row_old)
self.scene.edit.add_undo_item(self.scene)
def rejected(self):
"""Add no new regular polygon."""
pass
| 39.4375
| 112
| 0.660063
| 319
| 2,524
| 5.015674
| 0.291536
| 0.09
| 0.06
| 0.075
| 0.20875
| 0.11125
| 0.0775
| 0.0775
| 0.0775
| 0.0775
| 0
| 0.003606
| 0.230983
| 2,524
| 63
| 113
| 40.063492
| 0.820711
| 0.056656
| 0
| 0.083333
| 0
| 0
| 0.027119
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0.020833
| 0.104167
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b2cf4b5b97a007dddbfd9bea2e0b5aea5f19d54
| 576
|
py
|
Python
|
pyinfra/facts/util/distro.py
|
charles-l/pyinfra
|
1992d98ff31d41404427dbb3cc6095a7bebd4052
|
[
"MIT"
] | 1
|
2020-12-24T08:24:13.000Z
|
2020-12-24T08:24:13.000Z
|
pyinfra/facts/util/distro.py
|
charles-l/pyinfra
|
1992d98ff31d41404427dbb3cc6095a7bebd4052
|
[
"MIT"
] | null | null | null |
pyinfra/facts/util/distro.py
|
charles-l/pyinfra
|
1992d98ff31d41404427dbb3cc6095a7bebd4052
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import os
import distro
def get_distro_info(root_dir):
# We point _UNIXCONFDIR to root_dir
old_value = distro._UNIXCONFDIR
distro._UNIXCONFDIR = os.path.join(root_dir, 'etc')
obj = distro.LinuxDistribution(include_lsb=False, include_uname=False)
# NOTE: The parsing of LinuxDistribution distro information is done in a lazy way.
# This will force the parsing to happen before we restore the old value of _UNIXCONFDIR.
_ = obj.info()
distro._UNIXCONFDIR = old_value
return obj
| 27.428571
| 92
| 0.751736
| 80
| 576
| 5.15
| 0.575
| 0.050971
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189236
| 576
| 20
| 93
| 28.8
| 0.882227
| 0.348958
| 0
| 0
| 0
| 0
| 0.008086
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b2d1574c0e19ae7863baaa36967e1b1432a37dd
| 3,206
|
py
|
Python
|
appium/webdriver/common/multi_action.py
|
salabogdan/python-client
|
66208fdbbc8f0a8b0e90376b404135b57e797fa5
|
[
"Apache-2.0"
] | 1
|
2021-07-23T03:56:49.000Z
|
2021-07-23T03:56:49.000Z
|
appium/webdriver/common/multi_action.py
|
ayvnkhan/python-client
|
ba408b74f0d30fc06a51e77f68fc5cfd4ac8f99a
|
[
"Apache-2.0"
] | 11
|
2019-07-16T04:21:22.000Z
|
2021-02-24T15:11:02.000Z
|
appium/webdriver/common/multi_action.py
|
ki4070ma/python-client
|
d5f29f08a2fe9b5a9cca4162726c7cfb4faa42e9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The Selenium team implemented something like the Multi Action API in the form of
# "action chains" (https://code.google.com/p/selenium/source/browse/py/selenium/webdriver/common/action_chains.py).
# These do not quite work for this situation, and do not allow for ad hoc action
# chaining as the spec requires.
import copy
from typing import TYPE_CHECKING, Dict, List, Optional, TypeVar, Union
from appium.webdriver.mobilecommand import MobileCommand as Command
if TYPE_CHECKING:
from appium.webdriver.common.touch_action import TouchAction
from appium.webdriver.webdriver import WebDriver
from appium.webdriver.webelement import WebElement
T = TypeVar('T', bound='MultiAction')
class MultiAction:
def __init__(self, driver: 'WebDriver', element: Optional['WebElement'] = None) -> None:
self._driver = driver
self._element = element
self._touch_actions: List['TouchAction'] = []
def add(self, *touch_actions: 'TouchAction') -> None:
"""Add TouchAction objects to the MultiAction, to be performed later.
Args:
touch_actions: one or more TouchAction objects describing a chain of actions to be performed by one finger
Usage:
| a1 = TouchAction(driver)
| a1.press(el1).move_to(el2).release()
| a2 = TouchAction(driver)
| a2.press(el2).move_to(el1).release()
| MultiAction(driver).add(a1, a2)
Returns:
`MultiAction`: Self instance
"""
for touch_action in touch_actions:
if self._touch_actions is None:
self._touch_actions = []
self._touch_actions.append(copy.copy(touch_action))
def perform(self: T) -> T:
"""Perform the actions stored in the object.
Usage:
| a1 = TouchAction(driver)
| a1.press(el1).move_to(el2).release()
| a2 = TouchAction(driver)
| a2.press(el2).move_to(el1).release()
| MultiAction(driver).add(a1, a2).perform()
Returns:
`MultiAction`: Self instance
"""
self._driver.execute(Command.MULTI_ACTION, self.json_wire_gestures)
# clean up and be ready for the next batch
self._touch_actions = []
return self
@property
def json_wire_gestures(self) -> Dict[str, Union[List, str]]:
actions = []
for action in self._touch_actions:
actions.append(action.json_wire_gestures)
if self._element is not None:
return {'actions': actions, 'elementId': self._element.id}
return {'actions': actions}
| 35.622222
| 118
| 0.663132
| 409
| 3,206
| 5.0978
| 0.393643
| 0.051799
| 0.053717
| 0.015348
| 0.114149
| 0.114149
| 0.114149
| 0.114149
| 0.114149
| 0.114149
| 0
| 0.009889
| 0.242982
| 3,206
| 89
| 119
| 36.022472
| 0.849197
| 0.498752
| 0
| 0.066667
| 0
| 0
| 0.053597
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b2de30f1514c024be028007b3c7a182b53eba57
| 8,652
|
py
|
Python
|
src/visu/visualizer.py
|
JonasFrey96/PLR2
|
a0498e6ff283a27c6db11b3d57d3b3100026f069
|
[
"MIT"
] | null | null | null |
src/visu/visualizer.py
|
JonasFrey96/PLR2
|
a0498e6ff283a27c6db11b3d57d3b3100026f069
|
[
"MIT"
] | 2
|
2020-06-30T17:33:54.000Z
|
2020-07-07T18:12:21.000Z
|
src/visu/visualizer.py
|
JonasFrey96/PLR2
|
a0498e6ff283a27c6db11b3d57d3b3100026f069
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys
import os
from PIL import Image
from visu.helper_functions import save_image
from scipy.spatial.transform import Rotation as R
from helper import re_quat
import copy
import torch
import numpy as np
import k3d
class Visualizer():
def __init__(self, p_visu, writer=None):
if p_visu[-1] != '/':
p_visu = p_visu + '/'
self.p_visu = p_visu
self.writer = writer
if not os.path.exists(self.p_visu):
os.makedirs(self.p_visu)
def plot_estimated_pose(self, tag, epoch, img, points, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, store=False, jupyter=False, w=2):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
points:= points of the object model [length,x,y,z]
trans: [1,3]
rot: [3,3]
"""
img_d = copy.deepcopy(img)
points = np.dot(points, rot_mat.T)
points = np.add(points, trans[0, :])
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounce")
pass
if jupyter:
display(Image.fromarray(img_d))
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
#print("IMAGE D:" ,img_d,img_d.shape )
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_bounding_box(self, tag, epoch, img, rmin=0, rmax=0, cmin=0, cmax=0, str_width=2, store=False, jupyter=False, b=None):
"""
tag := tensorboard tag
epoch := tensorboard epoche
store := ture -> stores the image to standard path
path := != None creats the path and store to it path/tag.png
img:= original_image, [widht,height,RGB]
"""
if isinstance(b, dict):
rmin = b['rmin']
rmax = b['rmax']
cmin = b['cmin']
cmax = b['cmax']
# ToDo check Input data
img_d = np.array(copy.deepcopy(img))
c = [0, 0, 255]
rmin_mi = max(0, rmin - str_width)
rmin_ma = min(img_d.shape[0], rmin + str_width)
rmax_mi = max(0, rmax - str_width)
rmax_ma = min(img_d.shape[0], rmax + str_width)
cmin_mi = max(0, cmin - str_width)
cmin_ma = min(img_d.shape[1], cmin + str_width)
cmax_mi = max(0, cmax - str_width)
cmax_ma = min(img_d.shape[1], cmax + str_width)
img_d[rmin_mi:rmin_ma, cmin:cmax, :] = c
img_d[rmax_mi:rmax_ma, cmin:cmax, :] = c
img_d[rmin:rmax, cmin_mi:cmin_ma, :] = c
img_d[rmin:rmax, cmax_mi:cmax_ma, :] = c
print("STORE", store)
img_d = img_d.astype(np.uint8)
if store:
#store_ar = (img_d* 255).round().astype(np.uint8)
save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu)
if jupyter:
display(Image.fromarray(img_d))
if self.writer is not None:
self.writer.add_image(tag, img_d.astype(
np.uint8), global_step=epoch, dataformats='HWC')
def plot_pcd(x, point_size=0.005, c='g'):
"""
x: point_nr,3
"""
if c == 'b':
k = 245
elif c == 'g':
k = 25811000
elif c == 'r':
k = 11801000
elif c == 'black':
k = 2580
else:
k = 2580
colors = np.ones(x.shape[0]) * k
plot = k3d.plot(name='points')
plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
def plot_two_pcd(x, y, point_size=0.005, c1='g', c2='r'):
if c1 == 'b':
k = 245
elif c1 == 'g':
k = 25811000
elif c1 == 'r':
k = 11801000
elif c1 == 'black':
k = 2580
else:
k = 2580
if c2 == 'b':
k2 = 245
elif c2 == 'g':
k2 = 25811000
elif c2 == 'r':
k2 = 11801000
elif c2 == 'black':
k2 = 2580
else:
k2 = 2580
col1 = np.ones(x.shape[0]) * k
col2 = np.ones(y.shape[0]) * k2
plot = k3d.plot(name='points')
plt_points = k3d.points(x, col1.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points = k3d.points(y, col2.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
class SequenceVisualizer():
def __init__(self, seq_data, images_path, output_path=None):
self.seq_data = seq_data
self.images_path = images_path
self.output_path = output_path
def plot_points_on_image(self, seq_no, frame_no, jupyter=False, store=False, pose_type='filtered'):
seq_data = self.seq_data
images_path = self.images_path
output_path = self.output_path
frame = seq_data[seq_no][frame_no]
unique_desig = frame['dl_dict']['unique_desig'][0]
if pose_type == 'ground_truth':
# ground truth
t = frame['dl_dict']['gt_trans'].reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['dl_dict']['gt_rot_wxyz'][0]), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'filtered':
# filter pred
t = np.array(frame['filter_pred']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['filter_pred']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
elif pose_type == 'final_pred_obs':
# final pred
t = np.array(frame['final_pred_obs']['t']).reshape(1, 3)
rot_quat = re_quat(copy.deepcopy(
frame['final_pred_obs']['r_wxyz']), 'wxyz')
rot = R.from_quat(rot_quat).as_matrix()
else:
raise Exception('Pose type not implemented.')
w = 2
if type(unique_desig) != str:
im = np.array(Image.open(
images_path + unique_desig[0] + '-color.png')) # ycb
else:
im = np.array(Image.open(
images_path + unique_desig + '.png')) # laval
img_d = copy.deepcopy(im)
dl_dict = frame['dl_dict']
points = copy.deepcopy(
seq_data[seq_no][0]['dl_dict']['model_points'][0, :, :])
points = np.dot(points, rot.T)
points = np.add(points, t[0, :])
cam_cx = dl_dict['cam_cal'][0][0]
cam_cy = dl_dict['cam_cal'][0][1]
cam_fx = dl_dict['cam_cal'][0][2]
cam_fy = dl_dict['cam_cal'][0][3]
for i in range(0, points.shape[0]):
p_x = points[i, 0]
p_y = points[i, 1]
p_z = points[i, 2]
u = int(((p_x / p_z) * cam_fx) + cam_cx)
v = int(((p_y / p_z) * cam_fy) + cam_cy)
try:
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255
img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0
except:
#print("out of bounds")
pass
img_disp = Image.fromarray(img_d)
if jupyter:
display(img_disp)
if store:
outpath = output_path + \
'{}_{}_{}.png'.format(pose_type, seq_no, frame_no)
img_disp.save(outpath, "PNG", compress_level=1)
print("Saved image to {}".format(outpath))
def save_sequence(self, seq_no, pose_type='filtered', name=''):
for fn in range(len(self.seq_data)):
self.plot_points_on_image(seq_no, fn, False, True, pose_type)
if name:
video_name = '{}_{}_{}'.format(name, pose_type, seq_no)
else:
video_name = '{}_{}'.format(pose_type, seq_no)
cmd = "cd {} && ffmpeg -r 10 -i ./filtered_{}_%d.png -vcodec mpeg4 -y {}.mp4".format(
self.output_path, seq_no, video_name)
os.system(cmd)
| 33.929412
| 194
| 0.532016
| 1,278
| 8,652
| 3.400626
| 0.165102
| 0.027612
| 0.012425
| 0.008283
| 0.502531
| 0.413254
| 0.377358
| 0.377358
| 0.362172
| 0.344225
| 0
| 0.041617
| 0.325127
| 8,652
| 254
| 195
| 34.062992
| 0.702689
| 0.085992
| 0
| 0.398964
| 0
| 0.005181
| 0.05912
| 0
| 0
| 0
| 0
| 0.003937
| 0
| 1
| 0.041451
| false
| 0.010363
| 0.056995
| 0
| 0.108808
| 0.010363
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b2de9f9abfa7c9d1e5aab26305227c69409476d
| 3,317
|
py
|
Python
|
leetCode_Q37_serializeTree.py
|
FreesiaLikesPomelo/-offer
|
14ac73cb46d13c7f5bbc294329a14f3c5995bc7a
|
[
"Apache-2.0"
] | null | null | null |
leetCode_Q37_serializeTree.py
|
FreesiaLikesPomelo/-offer
|
14ac73cb46d13c7f5bbc294329a14f3c5995bc7a
|
[
"Apache-2.0"
] | null | null | null |
leetCode_Q37_serializeTree.py
|
FreesiaLikesPomelo/-offer
|
14ac73cb46d13c7f5bbc294329a14f3c5995bc7a
|
[
"Apache-2.0"
] | null | null | null |
'''
面试题37. 序列化二叉树
请实现两个函数,分别用来序列化和反序列化二叉树。
示例:
你可以将以下二叉树:
1
/ \
2 3
/ \
4 5
序列化为 "[1,2,3,null,null,4,5]"
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 执行用时 :240 ms, 在所有 Python3 提交中击败了22.75%的用户
# 内存消耗 :31 MB, 在所有 Python3 提交中击败了100.00%的用户
class Codec:
def __init__(self):
self.tree = []
self.temp = []
self.flag = 1 # non-None element appears again add self.temp to self.tree
def traByLayer(self, tree: List[TreeNode]):
if tree==[]:
return
else:
temp = tree.pop(0)
if temp!=None:
self.tree+=self.temp
self.temp = []
self.tree.append(temp.val)
tree.append(temp.left)
tree.append(temp.right)
else:
self.temp.append(None)
#print("trabylary",self.tree)
self.traByLayer(tree)
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root==None:
return ''
tree = [root]
self.traByLayer(tree)
print(str(self.tree))
return str(self.tree)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
#data = '[1, 2, 3, 1, 3, 2, 4]'
if data=='':
return None
start = 0
end = 0
tree = []
for i in range(len(data)):
if data[i]==',' or data[i]==']':
start = end+1
end = i
if data[start:end]!=' None':
#print(start,end,data[start:end])
tree.append(int(data[start:end]))
else:
tree.append(None)
#print("Tree",tree,"then build the Tree")
root = TreeNode(tree.pop(0))
self.buildTreeByList([root],tree)
return root
def buildTreeByList(self,r:List[TreeNode], data: List[int]):
if r==[] or data==[]:
return
root = r.pop(0)
datalen = len(data)
if datalen==0:
return
elif datalen<=2:
#print("root",root.val,"tree",data,"datalen",datalen)
temp = data.pop(0)
if temp!=None:
root.left = TreeNode(temp)
r.append(root.left)
if data!=[]:
temp = data.pop(0)
if temp!=None:
root.right = TreeNode(temp)
r.append(root.right)
return
else:
#print("root",root.val,"tree",data,"datalen",datalen)
temp = data.pop(0)
if temp!=None:
root.left = TreeNode(temp)
r.append(root.left)
temp = data.pop(0)
if temp!=None:
root.right = TreeNode(temp)
r.append(root.right)
self.buildTreeByList(r,data)
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| 27.188525
| 81
| 0.472716
| 378
| 3,317
| 4.126984
| 0.256614
| 0.041026
| 0.019231
| 0.032051
| 0.232051
| 0.197436
| 0.197436
| 0.197436
| 0.197436
| 0.197436
| 0
| 0.024169
| 0.401266
| 3,317
| 121
| 82
| 27.413223
| 0.761329
| 0.274344
| 0
| 0.408451
| 0
| 0
| 0.003041
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070423
| false
| 0
| 0
| 0
| 0.197183
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b2f80664f980dad5a40411dc361a14a2b34e519
| 8,263
|
py
|
Python
|
CTFd/api/v1/users.py
|
MrQubo/CTFd
|
5c8ffff1412ea91ad6cf87135cb3d175a1223544
|
[
"Apache-2.0"
] | null | null | null |
CTFd/api/v1/users.py
|
MrQubo/CTFd
|
5c8ffff1412ea91ad6cf87135cb3d175a1223544
|
[
"Apache-2.0"
] | null | null | null |
CTFd/api/v1/users.py
|
MrQubo/CTFd
|
5c8ffff1412ea91ad6cf87135cb3d175a1223544
|
[
"Apache-2.0"
] | null | null | null |
from flask import session, request, abort
from flask_restplus import Namespace, Resource
from CTFd.models import (
db,
Users,
Solves,
Awards,
Tracking,
Unlocks,
Submissions,
Notifications,
)
from CTFd.utils.decorators import authed_only, admins_only, ratelimit
from CTFd.cache import clear_standings
from CTFd.utils.user import get_current_user, is_admin
from CTFd.utils.decorators.visibility import (
check_account_visibility,
check_score_visibility,
)
from CTFd.schemas.submissions import SubmissionSchema
from CTFd.schemas.awards import AwardSchema
from CTFd.schemas.users import UserSchema
users_namespace = Namespace("users", description="Endpoint to retrieve Users")
@users_namespace.route("")
class UserList(Resource):
@check_account_visibility
def get(self):
users = Users.query.filter_by(banned=False, hidden=False)
response = UserSchema(view="user", many=True).dump(users)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@admins_only
def post(self):
req = request.get_json()
schema = UserSchema("admin")
response = schema.load(req)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.add(response.data)
db.session.commit()
if request.args.get("notify"):
name = response.data.name
password = req.get("password")
clear_standings()
response = schema.dump(response.data)
return {"success": True, "data": response.data}
@users_namespace.route("/<int:user_id>")
@users_namespace.param("user_id", "User ID")
class UserPublic(Resource):
@check_account_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
response = UserSchema(view=session.get("type", "user")).dump(user)
if response.errors:
return {"success": False, "errors": response.errors}, 400
response.data["place"] = user.place
response.data["score"] = user.score
return {"success": True, "data": response.data}
@admins_only
def patch(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
data = request.get_json()
data["id"] = user_id
schema = UserSchema(view="admin", instance=user, partial=True)
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.commit()
response = schema.dump(response.data)
db.session.close()
clear_standings()
return {"success": True, "data": response}
@admins_only
def delete(self, user_id):
Notifications.query.filter_by(user_id=user_id).delete()
Awards.query.filter_by(user_id=user_id).delete()
Unlocks.query.filter_by(user_id=user_id).delete()
Submissions.query.filter_by(user_id=user_id).delete()
Solves.query.filter_by(user_id=user_id).delete()
Tracking.query.filter_by(user_id=user_id).delete()
Users.query.filter_by(id=user_id).delete()
db.session.commit()
db.session.close()
clear_standings()
return {"success": True}
@users_namespace.route("/me")
class UserPrivate(Resource):
@authed_only
def get(self):
user = get_current_user()
response = UserSchema("self").dump(user).data
response["place"] = user.place
response["score"] = user.score
return {"success": True, "data": response}
@authed_only
def patch(self):
user = get_current_user()
data = request.get_json()
schema = UserSchema(view="self", instance=user, partial=True)
response = schema.load(data)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.commit()
response = schema.dump(response.data)
db.session.close()
clear_standings()
return {"success": True, "data": response.data}
@users_namespace.route("/me/solves")
class UserPrivateSolves(Resource):
@authed_only
def get(self):
user = get_current_user()
solves = user.get_solves(admin=True)
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(solves)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@users_namespace.route("/me/fails")
class UserPrivateFails(Resource):
@authed_only
def get(self):
user = get_current_user()
fails = user.get_fails(admin=True)
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(fails)
if response.errors:
return {"success": False, "errors": response.errors}, 400
if is_admin():
data = response.data
else:
data = []
count = len(response.data)
return {"success": True, "data": data, "meta": {"count": count}}
@users_namespace.route("/me/awards")
@users_namespace.param("user_id", "User ID")
class UserPrivateAwards(Resource):
@authed_only
def get(self):
user = get_current_user()
awards = user.get_awards(admin=True)
view = "user" if not is_admin() else "admin"
response = AwardSchema(view=view, many=True).dump(awards)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@users_namespace.route("/<user_id>/solves")
@users_namespace.param("user_id", "User ID")
class UserPublicSolves(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
solves = user.get_solves(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(solves)
if response.errors:
return {"success": False, "errors": response.errors}, 400
# return {"success": True, "data": response.data}
return {"success": True, "data": None}
@users_namespace.route("/<user_id>/fails")
@users_namespace.param("user_id", "User ID")
class UserPublicFails(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
fails = user.get_fails(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = SubmissionSchema(view=view, many=True).dump(fails)
if response.errors:
return {"success": False, "errors": response.errors}, 400
if is_admin():
data = response.data
else:
data = []
count = len(response.data)
# return {"success": True, "data": data, "meta": {"count": count}}
return {"success": True, "data": None, "meta": {"count": None}}
@users_namespace.route("/<user_id>/awards")
@users_namespace.param("user_id", "User ID or 'me'")
class UserPublicAwards(Resource):
@check_account_visibility
@check_score_visibility
def get(self, user_id):
user = Users.query.filter_by(id=user_id).first_or_404()
if (user.banned or user.hidden) and is_admin() is False:
abort(404)
awards = user.get_awards(admin=is_admin())
view = "user" if not is_admin() else "admin"
response = AwardSchema(view=view, many=True).dump(awards)
if response.errors:
return {"success": False, "errors": response.errors}, 400
# return {"success": True, "data": response.data}
return {"success": True, "data": None}
| 30.156934
| 78
| 0.634031
| 1,002
| 8,263
| 5.083832
| 0.107784
| 0.045936
| 0.028269
| 0.061837
| 0.740283
| 0.689635
| 0.668433
| 0.651551
| 0.558304
| 0.536514
| 0
| 0.009492
| 0.235024
| 8,263
| 273
| 79
| 30.267399
| 0.796393
| 0.019605
| 0
| 0.59799
| 0
| 0
| 0.077199
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065327
| false
| 0.005025
| 0.050251
| 0
| 0.281407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b33e7e0e395a01bbb9aefe040bd4c754743cdbd
| 1,005
|
py
|
Python
|
getting_started/pages.py
|
emilhe/dash-extensions-docs
|
f44edba1c955242fc503185954ea5f3be69eb122
|
[
"MIT"
] | 1
|
2022-03-20T09:50:07.000Z
|
2022-03-20T09:50:07.000Z
|
getting_started/pages.py
|
emilhe/dash-extensions-docs
|
f44edba1c955242fc503185954ea5f3be69eb122
|
[
"MIT"
] | null | null | null |
getting_started/pages.py
|
emilhe/dash-extensions-docs
|
f44edba1c955242fc503185954ea5f3be69eb122
|
[
"MIT"
] | null | null | null |
import dash_labs as dl
from dash_extensions.enrich import DashBlueprint, DashProxy, html, Output, Input
def page_name(i: int):
return f"page{i}"
def make_page(i: int):
page = DashBlueprint()
page.layout = html.Div([html.H2(f"Page {i}"), html.Button('Click me!', id='btn'), html.Div(id='log')])
@page.callback(Output('log', 'children'), Input('btn', 'n_clicks'))
def on_click(n_clicks):
return f"Hello world {n_clicks} from page {i}!"
return page
app = DashProxy(prevent_initial_callbacks=True, plugins=[dl.plugins.pages])
# Register a few pages.
n_pages = 5
for i in range(n_pages):
page = make_page(i)
page.register(app, page_name(i), prefix=str(i))
# Setup main app layout.
app_shell = [html.H1("App shell"), dl.plugins.page_container]
navigation = html.Ul([html.Li(html.A(page_name(i), href=page_name(i))) for i in range(n_pages)])
app.layout = html.Div(app_shell + [navigation], style=dict(display="block"))
if __name__ == '__main__':
app.run_server()
| 34.655172
| 106
| 0.689552
| 162
| 1,005
| 4.098765
| 0.425926
| 0.037651
| 0.054217
| 0.033133
| 0.051205
| 0.051205
| 0
| 0
| 0
| 0
| 0
| 0.003492
| 0.145274
| 1,005
| 29
| 107
| 34.655172
| 0.769499
| 0.043781
| 0
| 0
| 0
| 0
| 0.115746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.095238
| 0.095238
| 0.380952
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b34589c449dc4aced65c72c732c394afc998c68
| 8,790
|
py
|
Python
|
zaqar/transport/wsgi/v2_0/homedoc.py
|
vkmc/zaqar-websocket
|
a93c460a28e541b5cc8b425d5fb4d69e78ab9f4b
|
[
"Apache-2.0"
] | 1
|
2015-03-22T18:41:13.000Z
|
2015-03-22T18:41:13.000Z
|
zaqar/transport/wsgi/v2_0/homedoc.py
|
vkmc/zaqar-websocket
|
a93c460a28e541b5cc8b425d5fb4d69e78ab9f4b
|
[
"Apache-2.0"
] | null | null | null |
zaqar/transport/wsgi/v2_0/homedoc.py
|
vkmc/zaqar-websocket
|
a93c460a28e541b5cc8b425d5fb4d69e78ab9f4b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
# NOTE(kgriffs): http://tools.ietf.org/html/draft-nottingham-json-home-03
JSON_HOME = {
'resources': {
# -----------------------------------------------------------------
# Queues
# -----------------------------------------------------------------
'rel/queues': {
'href-template': '/v2/queues{?marker,limit,detailed}',
'href-vars': {
'marker': 'param/marker',
'limit': 'param/queue_limit',
'detailed': 'param/detailed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/queue': {
'href-template': '/v2/queues/{queue_name}',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['PUT', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
'rel/queue_stats': {
'href-template': '/v2/queues/{queue_name}/stats',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Messages
# -----------------------------------------------------------------
'rel/messages': {
'href-template': ('/v2/queues/{queue_name}/messages'
'{?marker,limit,echo,include_claimed}'),
'href-vars': {
'queue_name': 'param/queue_name',
'marker': 'param/marker',
'limit': 'param/messages_limit',
'echo': 'param/echo',
'include_claimed': 'param/include_claimed',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post_messages': {
'href-template': '/v2/queues/{queue_name}/messages',
'href-vars': {
'queue_name': 'param/queue_name',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json'],
},
},
'rel/messages_delete': {
'href-template': '/v2/queues/{queue_name}/messages{?ids,pop}',
'href-vars': {
'queue_name': 'param/queue_name',
'ids': 'param/ids',
'pop': 'param/pop'
},
'hints': {
'allow': [
'DELETE'
],
'formats': {
'application/json': {}
}
}
},
'rel/message_delete': {
'href-template': '/v2/queues/{queue_name}/messages/{message_id}{?claim}', # noqa
'href-vars': {
'queue_name': 'param/queue_name',
'message_id': 'param/message_id',
'claim': 'param/claim_id'
},
'hints': {
'allow': [
'DELETE'
],
'formats': {
'application/json': {}
}
}
},
# -----------------------------------------------------------------
# Claims
# -----------------------------------------------------------------
'rel/claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/post_claim': {
'href-template': '/v2/queues/{queue_name}/claims{?limit}',
'href-vars': {
'queue_name': 'param/queue_name',
'limit': 'param/claim_limit',
},
'hints': {
'allow': ['POST'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
'rel/patch_claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['PATCH'],
'formats': {
'application/json': {},
},
'accept-post': ['application/json']
},
},
'rel/delete_claim': {
'href-template': '/v2/queues/{queue_name}/claims/{claim_id}',
'href-vars': {
'queue_name': 'param/queue_name',
'claim_id': 'param/claim_id',
},
'hints': {
'allow': ['DELETE'],
'formats': {
'application/json': {},
},
},
},
}
}
ADMIN_RESOURCES = {
# -----------------------------------------------------------------
# Pools
# -----------------------------------------------------------------
'rel/pools': {
'href-template': '/v2/pools{?detailed,limit,marker}',
'href-vars': {
'detailed': 'param/detailed',
'limit': 'param/pool_limit',
'marker': 'param/marker',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/pool': {
'href-template': '/v2/pools/{pool_name}',
'href-vars': {
'pool_name': 'param/pool_name',
},
'hints': {
'allow': ['GET', 'PUT', 'PATCH', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Flavors
# -----------------------------------------------------------------
'rel/flavors': {
'href-template': '/v2/flavors{?detailed,limit,marker}',
'href-vars': {
'detailed': 'param/detailed',
'limit': 'param/flavor_limit',
'marker': 'param/marker',
},
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
'rel/flavor': {
'href-template': '/v2/flavors/{flavor_name}',
'href-vars': {
'flavor_name': 'param/flavor_name',
},
'hints': {
'allow': ['GET', 'PUT', 'PATCH', 'DELETE'],
'formats': {
'application/json': {},
},
},
},
# -----------------------------------------------------------------
# Health
# -----------------------------------------------------------------
'rel/health': {
'href': '/v2/health',
'hints': {
'allow': ['GET'],
'formats': {
'application/json': {},
},
},
},
}
class Resource(object):
def __init__(self, conf):
if conf.admin_mode:
JSON_HOME['resources'].update(ADMIN_RESOURCES)
document = json.dumps(JSON_HOME, ensure_ascii=False, indent=4)
self.document_utf8 = document.encode('utf-8')
def on_get(self, req, resp, project_id):
resp.data = self.document_utf8
resp.content_type = 'application/json-home'
resp.cache_control = ['max-age=86400']
# status defaults to 200
| 31.170213
| 93
| 0.367577
| 625
| 8,790
| 5.04
| 0.2416
| 0.085714
| 0.111746
| 0.069841
| 0.551746
| 0.520635
| 0.471111
| 0.419683
| 0.311746
| 0.251111
| 0
| 0.007158
| 0.396018
| 8,790
| 281
| 94
| 31.281139
| 0.586174
| 0.169738
| 0
| 0.452586
| 0
| 0
| 0.359493
| 0.082335
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008621
| false
| 0
| 0.00431
| 0
| 0.017241
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b35baeaa7950e5538a7f5306ca85d6a854ed57e
| 82,533
|
py
|
Python
|
synapse/models/infotech.py
|
vertexproject/synapse
|
9712e2aee63914441c59ce6cfc060fe06a2e5920
|
[
"Apache-2.0"
] | 216
|
2017-01-17T18:52:50.000Z
|
2022-03-31T18:44:49.000Z
|
synapse/models/infotech.py
|
vertexproject/synapse
|
9712e2aee63914441c59ce6cfc060fe06a2e5920
|
[
"Apache-2.0"
] | 2,189
|
2017-01-17T22:31:48.000Z
|
2022-03-31T20:41:45.000Z
|
synapse/models/infotech.py
|
vertexproject/synapse
|
9712e2aee63914441c59ce6cfc060fe06a2e5920
|
[
"Apache-2.0"
] | 44
|
2017-01-17T16:50:57.000Z
|
2022-03-16T18:35:52.000Z
|
import asyncio
import logging
import synapse.exc as s_exc
import synapse.lib.types as s_types
import synapse.lib.module as s_module
import synapse.lib.version as s_version
logger = logging.getLogger(__name__)
class Cpe23Str(s_types.Str):
'''
CPE 2.3 Formatted String
https://nvlpubs.nist.gov/nistpubs/Legacy/IR/nistir7695.pdf
(Section 6.2)
cpe:2.3: part : vendor : product : version : update : edition :
language : sw_edition : target_sw : target_hw : other
* = "any"
- = N/A
'''
def __init__(self, modl, name, info, opts):
opts['lower'] = True
s_types.Str.__init__(self, modl, name, info, opts)
def _splitCpe23(self, text):
part = ''
parts = []
genr = iter(text)
try:
while True:
c = next(genr)
if c == '\\':
c += next(genr)
if c == ':':
parts.append(part)
part = ''
continue
part += c
except StopIteration:
parts.append(part)
return parts
def _normPyStr(self, valu):
if not valu.startswith('cpe:2.3:'):
mesg = 'CPE 2.3 string is expected to start with "cpe:2.3:"'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
text, info = s_types.Str._normPyStr(self, valu)
parts = self._splitCpe23(text)
if len(parts) != 13:
mesg = f'CPE 2.3 string has {len(parts)} parts, expected 13.'
raise s_exc.BadTypeValu(valu=valu, mesg=mesg)
subs = {
'part': parts[2],
'vendor': parts[3],
'product': parts[4],
'version': parts[5],
'update': parts[6],
'edition': parts[7],
'language': parts[8],
'sw_edition': parts[9],
'target_sw': parts[10],
'target_hw': parts[11],
'other': parts[12],
}
return ':'.join(parts), {'subs': subs}
class SemVer(s_types.Int):
'''
Provides support for parsing a semantic version string into its component
parts. This normalizes a version string into an integer to allow version
ordering. Prerelease information is disregarded for integer comparison
purposes, as we cannot map an arbitrary pre-release version into a integer
value
Major, minor and patch levels are represented as integers, with a max
width of 20 bits. The comparable integer value representing the semver
is the bitwise concatenation of the major, minor and patch levels.
Prerelease and build information will be parsed out and available as
strings if that information is present.
'''
def postTypeInit(self):
s_types.Int.postTypeInit(self)
self.setNormFunc(str, self._normPyStr)
self.setNormFunc(int, self._normPyInt)
def _normPyStr(self, valu):
valu = valu.strip()
if not valu:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='No text left after stripping whitespace')
subs = s_version.parseSemver(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Unable to parse string as a semver.')
valu = s_version.packVersion(subs.get('major'), subs.get('minor'), subs.get('patch'))
return valu, {'subs': subs}
def _normPyInt(self, valu):
if valu < 0:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a negative integer as a semver.')
if valu > s_version.mask60:
raise s_exc.BadTypeValu(valu=valu, name=self.name,
mesg='Cannot norm a integer larger than 1152921504606846975 as a semver.')
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.packVersion(major, minor, patch)
subs = {'major': major,
'minor': minor,
'patch': patch}
return valu, {'subs': subs}
def repr(self, valu):
major, minor, patch = s_version.unpackVersion(valu)
valu = s_version.fmtVersion(major, minor, patch)
return valu
loglevels = (
(10, 'debug'),
(20, 'info'),
(30, 'notice'),
(40, 'warning'),
(50, 'err'),
(60, 'crit'),
(70, 'alert'),
(80, 'emerg'),
)
class ItModule(s_module.CoreModule):
async def initCoreModule(self):
self.model.form('it:dev:str').onAdd(self._onFormItDevStr)
self.model.form('it:dev:pipe').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:mutex').onAdd(self._onFormMakeDevStr)
self.model.form('it:dev:regkey').onAdd(self._onFormMakeDevStr)
self.model.prop('it:prod:softver:arch').onSet(self._onPropSoftverArch)
self.model.prop('it:prod:softver:vers').onSet(self._onPropSoftverVers)
self.model.prop('it:prod:softver:software').onSet(self._onPropSoftverSoft)
def bruteVersionStr(self, valu):
'''
Brute force the version out of a string.
Args:
valu (str): String to attempt to get version information for.
Notes:
This first attempts to parse strings using the it:semver normalization
before attempting to extract version parts out of the string.
Returns:
int, dict: The system normalized version integer and a subs dictionary.
'''
try:
valu, info = self.core.model.type('it:semver').norm(valu)
subs = info.get('subs')
return valu, subs
except s_exc.BadTypeValu:
# Try doing version part extraction by noming through the string
subs = s_version.parseVersionParts(valu)
if subs is None:
raise s_exc.BadTypeValu(valu=valu, name='bruteVersionStr',
mesg='Unable to brute force version parts out of the string')
if subs:
valu = s_version.packVersion(subs.get('major'),
subs.get('minor', 0),
subs.get('patch', 0))
return valu, subs
async def _onFormItDevStr(self, node):
await node.set('norm', node.ndef[1])
async def _onFormMakeDevStr(self, node):
pprop = node.ndef[1]
await node.snap.addNode('it:dev:str', pprop)
async def _onPropSoftverSoft(self, node, oldv):
# Check to see if name is available and set it if possible
prop = node.get('software')
if prop:
opts = {'vars': {'soft': prop}}
nodes = await node.snap.nodes('it:prod:soft=$soft', opts=opts)
if nodes:
name = nodes[0].get('name')
if name:
await node.set('software:name', name)
async def _onPropSoftverArch(self, node, oldv):
# make it:dev:str for arch
prop = node.get('arch')
if prop:
await node.snap.addNode('it:dev:str', prop)
async def _onPropSoftverVers(self, node, oldv):
# Set vers:norm and make it's normed valu
prop = node.get('vers')
if not prop:
return
await node.set('vers:norm', prop)
# Make it:dev:str from version str
await node.snap.addNode('it:dev:str', prop)
# form the semver properly or bruteforce parts
try:
valu, subs = self.bruteVersionStr(prop)
await node.set('semver', valu)
for k, v in subs.items():
await node.set(f'semver:{k}', v)
except asyncio.CancelledError: # pragma: no cover
raise
except Exception:
logger.exception('Failed to brute force version string [%s]', prop)
def getModelDefs(self):
modl = {
'ctors': (
('it:semver', 'synapse.models.infotech.SemVer', {}, {
'doc': 'Semantic Version type.',
}),
('it:sec:cpe', 'synapse.models.infotech.Cpe23Str', {}, {
'doc': 'A NIST CPE 2.3 Formatted String',
}),
),
'types': (
('it:hostname', ('str', {'strip': True, 'lower': True}), {
'doc': 'The name of a host or system.',
}),
('it:host', ('guid', {}), {
'doc': 'A GUID that represents a host or system.'
}),
('it:log:event', ('guid', {}), {
'doc': 'A GUID representing an individual log event.',
'interfaces': ('it:host:activity',),
}),
('it:network', ('guid', {}), {
'doc': 'A GUID that represents a logical network.'
}),
('it:domain', ('guid', {}), {
'doc': 'A logical boundary of authentication and configuration such as a windows domain.'
}),
('it:account', ('guid', {}), {
'doc': 'A GUID that represents an account on a host or network.'
}),
('it:group', ('guid', {}), {
'doc': 'A GUID that represents a group on a host or network.'
}),
('it:logon', ('guid', {}), {
'doc': 'A GUID that represents an individual logon/logoff event.'
}),
('it:hosturl', ('comp', {'fields': (('host', 'it:host'), ('url', 'inet:url'))}), {
'doc': 'A url hosted on or served by a host or system.',
}),
('it:sec:cve', ('str', {'lower': True, 'regex': r'(?i)^CVE-[0-9]{4}-[0-9]{4,}$'}), {
'doc': 'A vulnerability as designated by a Common Vulnerabilities and Exposures (CVE) number.',
'ex': 'cve-2012-0158'
}),
('it:sec:cwe', ('str', {'regex': r'^CWE-[0-9]{1,8}$'}), {
'doc': 'NIST NVD Common Weaknesses Enumeration Specification',
'ex': 'CWE-120',
}),
('it:mitre:attack:status', ('str', {'enums': 'current,deprecated,withdrawn'}), {
'doc': 'A Mitre ATT&CK element status.',
'ex': 'current',
}),
('it:mitre:attack:group', ('str', {'regex': r'^G[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Group ID.',
'ex': 'G0100',
}),
('it:mitre:attack:tactic', ('str', {'regex': r'^TA[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Tactic ID.',
'ex': 'TA0040',
}),
('it:mitre:attack:technique', ('str', {'regex': r'^T[0-9]{4}(.[0-9]{3})?$'}), {
'doc': 'A Mitre ATT&CK Technique ID.',
'ex': 'T1548',
}),
('it:mitre:attack:mitigation', ('str', {'regex': r'^M[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Mitigation ID.',
'ex': 'M1036',
}),
('it:mitre:attack:software', ('str', {'regex': r'^S[0-9]{4}$'}), {
'doc': 'A Mitre ATT&CK Software ID.',
'ex': 'S0154',
}),
('it:dev:str', ('str', {}), {
'doc': 'A developer-selected string.'
}),
('it:dev:pipe', ('str', {}), {
'doc': 'A string representing a named pipe.',
}),
('it:dev:mutex', ('str', {}), {
'doc': 'A string representing a mutex.',
}),
('it:dev:int', ('int', {}), {
'doc': 'A developer selected integer constant.',
}),
('it:dev:regkey', ('str', {}), {
'doc': 'A Windows registry key.',
'ex': 'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Run',
}),
('it:dev:regval', ('guid', {}), {
'doc': 'A Windows registry key/value pair.',
}),
('it:prod:soft', ('guid', {}), {
'doc': 'A arbitrary, unversioned software product.',
}),
('it:adid', ('str', {'lower': True, 'strip': True}), {
'doc': 'An advertising identification string.'}),
('it:os:windows:sid', ('str', {'regex': r'^S-1-[0-59]-\d{2}-\d{8,10}-\d{8,10}-\d{8,10}-[1-9]\d{3}$'}), {
'doc': 'A Microsoft Windows Security Identifier.',
'ex': 'S-1-5-21-1220945662-1202665555-839525555-5555',
}),
('it:os:ios:idfa', ('it:adid', {}), {
'doc': 'An iOS advertising identification string.'}),
('it:os:android:aaid', ('it:adid', {}), {
'doc': 'An android advertising identification string.'}),
('it:os:android:perm', ('str', {}), {
'doc': 'An android permission string.'}),
('it:os:android:intent', ('str', {}), {
'doc': 'An android intent string.'}),
('it:os:android:reqperm', ('comp', {'fields': (
('app', 'it:prod:soft'),
('perm', 'it:os:android:perm'))}), {
'doc': 'The given software requests the android permission.'}),
('it:os:android:ilisten', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent'))}), {
'doc': 'The given software listens for an android intent.'}),
('it:os:android:ibroadcast', ('comp', {'fields': (
('app', 'it:prod:soft'),
('intent', 'it:os:android:intent')
)}), {
'doc': 'The given software broadcasts the given Android intent.'}),
('it:prod:softver', ('guid', {}), {
'doc': 'A specific version of a software product.'}),
('it:prod:softfile', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('file', 'file:bytes'))}), {
'doc': 'A file is distributed by a specific software version.'}),
('it:prod:softlib', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('lib', 'it:prod:softver'))}), {
'doc': 'A software version contains a library software version.'}),
('it:prod:softos', ('comp', {'fields': (
('soft', 'it:prod:softver'),
('os', 'it:prod:softver'))}), {
'doc': 'The software version is known to be compatible with the given os software version.'}),
('it:hostsoft', ('comp', {'fields': (('host', 'it:host'), ('softver', 'it:prod:softver'))}), {
'doc': 'A version of a software product which is present on a given host.',
}),
('it:av:sig', ('comp', {'fields': (('soft', 'it:prod:soft'), ('name', ('str', {'lower': True})))}), {
'doc': 'A signature name within the namespace of an antivirus engine name.'
}),
('it:av:filehit', ('comp', {'fields': (('file', 'file:bytes'), ('sig', 'it:av:sig'))}), {
'doc': 'A file that triggered an alert on a specific antivirus signature.',
}),
('it:av:prochit', ('guid', {}), {
'doc': 'An instance of a process triggering an alert on a specific antivirus signature.'
}),
('it:auth:passwdhash', ('guid', {}), {
'doc': 'An instance of a password hash.',
}),
('it:exec:proc', ('guid', {}), {
'doc': 'A process executing on a host. May be an actual (e.g., endpoint) or virtual (e.g., malware sandbox) host.',
}),
('it:exec:thread', ('guid', {}), {
'doc': 'A thread executing in a process.',
}),
('it:exec:loadlib', ('guid', {}), {
'doc': 'A library load event in a process.',
}),
('it:exec:mmap', ('guid', {}), {
'doc': 'A memory mapped segment located in a process.',
}),
('it:cmd', ('str', {'strip': True}), {
'doc': 'A unique command-line string.',
'ex': 'foo.exe --dostuff bar',
}),
('it:exec:mutex', ('guid', {}), {
'doc': 'A mutex created by a process at runtime.',
}),
('it:exec:pipe', ('guid', {}), {
'doc': 'A named pipe created by a process at runtime.',
}),
('it:exec:url', ('guid', {}), {
'doc': 'An instance of a host requesting a URL.',
}),
('it:exec:bind', ('guid', {}), {
'doc': 'An instance of a host binding a listening port.',
}),
('it:fs:file', ('guid', {}), {
'doc': 'A file on a host.'
}),
('it:exec:file:add', ('guid', {}), {
'doc': 'An instance of a host adding a file to a filesystem.',
}),
('it:exec:file:del', ('guid', {}), {
'doc': 'An instance of a host deleting a file from a filesystem.',
}),
('it:exec:file:read', ('guid', {}), {
'doc': 'An instance of a host reading a file from a filesystem.',
}),
('it:exec:file:write', ('guid', {}), {
'doc': 'An instance of a host writing a file to a filesystem.',
}),
('it:exec:reg:get', ('guid', {}), {
'doc': 'An instance of a host getting a registry key.',
}),
('it:exec:reg:set', ('guid', {}), {
'doc': 'An instance of a host creating or setting a registry key.',
}),
('it:exec:reg:del', ('guid', {}), {
'doc': 'An instance of a host deleting a registry key.',
}),
('it:app:yara:rule', ('guid', {}), {
'doc': 'A YARA rule unique identifier.',
}),
('it:app:yara:match', ('comp', {'fields': (('rule', 'it:app:yara:rule'), ('file', 'file:bytes'))}), {
'doc': 'A YARA rule match to a file.',
}),
('it:app:yara:procmatch', ('guid', {}), {
'doc': 'An instance of a YARA rule match to a process.',
}),
('it:app:snort:rule', ('guid', {}), {
'doc': 'A snort rule unique identifier.',
}),
('it:app:snort:hit', ('guid', {}), {
'doc': 'An instance of a snort rule hit.',
}),
('it:reveng:function', ('guid', {}), {
'doc': 'A function inside an executable.',
}),
('it:reveng:filefunc', ('comp', {'fields': (('file', 'file:bytes'), ('function', 'it:reveng:function'))}), {
'doc': 'An instance of a function in an executable.',
}),
('it:reveng:funcstr', ('comp', {'fields': (('function', 'it:reveng:function'), ('string', 'str'))}), {
'deprecated': True,
'doc': 'A reference to a string inside a function.',
}),
('it:reveng:impfunc', ('str', {'lower': 1}), {
'doc': 'A function from an imported library.',
}),
),
'interfaces': (
('it:host:activity', {
'props': (
('exe', ('file:bytes', {}), {
'doc': 'The executable file which caused the activity.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The host process which caused the activity.'}),
('thread', ('it:exec:thread', {}), {
'doc': 'The host thread which caused the activity.'}),
('host', ('it:host', {}), {
'doc': 'The host on which the activity occurred.'}),
('time', ('time', {}), {
'doc': 'The time that the activity started.'}),
),
}),
),
'forms': (
('it:hostname', {}, ()),
('it:host', {}, (
('name', ('it:hostname', {}), {
'doc': 'The name of the host or system.',
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the host.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain that the host is a member of.',
}),
('ipv4', ('inet:ipv4', {}), {
'doc': 'The last known ipv4 address for the host.'
}),
('latlong', ('geo:latlong', {}), {
'doc': 'The last known location for the host.'
}),
('place', ('geo:place', {}), {
'doc': 'The place where the host resides.',
}),
('loc', ('loc', {}), {
'doc': 'The geo-political location string for the node.',
}),
('os', ('it:prod:softver', {}), {
'doc': 'The operating system of the host.'
}),
('manu', ('str', {}), {
'doc': 'The manufacturer of the host.',
}),
('model', ('str', {}), {
'doc': 'The product model of the host.',
}),
('serial', ('str', {}), {
'doc': 'The serial number of the host.',
}),
('operator', ('ps:contact', {}), {
'doc': 'The operator of the host.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given host.',
}),
)),
('it:log:event', {}, (
('mesg', ('str', {}), {
'doc': 'The log messsage text.',
}),
('severity', ('int', {'enums': loglevels}), {
'doc': 'A log level integer that increases with severity.',
}),
('data', ('data', {}), {
'doc': 'A raw JSON record of the log event.',
}),
)),
('it:domain', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the domain.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the domain.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that operates the given domain.',
}),
)),
('it:network', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the network.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the network.',
}),
('org', ('ou:org', {}), {
'doc': 'The org that owns/operates the network.',
}),
('net4', ('inet:net4', {}), {
'doc': 'The optional contiguous IPv4 address range of this network.',
}),
('net6', ('inet:net6', {}), {
'doc': 'The optional contiguous IPv6 address range of this network.',
}),
)),
('it:account', {}, (
('user', ('inet:user', {}), {
'doc': 'The username associated with the account',
}),
('contact', ('ps:contact', {}), {
'doc': 'Additional contact information associated with this account.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the account is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the account is registered.',
}),
('posix:uid', ('int', {}), {
'doc': 'The user ID of the account.',
'ex': '1001',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('posix:gecos', ('int', {}), {
'doc': 'The GECOS field for the POSIX account.',
}),
('posix:home', ('file:path', {}), {
'doc': "The path to the POSIX account's home directory.",
'ex': '/home/visi',
}),
('posix:shell', ('file:path', {}), {
'doc': "The path to the POSIX account's default shell.",
'ex': '/bin/bash',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the account.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'An array of groups that the account is a member of.',
}),
)),
('it:group', {}, (
('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), {
'doc': 'The name of the group.',
}),
('desc', ('str', {}), {
'doc': 'A brief description of the group.',
}),
('host', ('it:host', {}), {
'doc': 'The host where the group is registered.',
}),
('domain', ('it:domain', {}), {
'doc': 'The authentication domain where the group is registered.',
}),
('groups', ('array', {'type': 'it:group'}), {
'doc': 'Groups that are a member of this group.',
}),
('posix:gid', ('int', {}), {
'doc': 'The primary group ID of the account.',
'ex': '1001',
}),
('windows:sid', ('it:os:windows:sid', {}), {
'doc': 'The Microsoft Windows Security Identifier of the group.',
}),
)),
('it:logon', {}, (
('time', ('time', {}), {
'doc': 'The time the logon occured.',
}),
('success', ('bool', {}), {
'doc': 'Set to false to indicate an unsuccessful logon attempt.',
}),
('logoff:time', ('time', {}), {
'doc': 'The time the logon session ended.',
}),
('host', ('it:host', {}), {
'doc': 'The host that the account logged in to.',
}),
('account', ('it:account', {}), {
'doc': 'The account that logged in.',
}),
('creds', ('auth:creds', {}), {
'doc': 'The credentials that were used for the logon.',
}),
('duration', ('duration', {}), {
'doc': 'The duration of the logon session.',
}),
('client:host', ('it:host', {}), {
'doc': 'The host where the logon originated.',
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 where the logon originated.',
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 where the logon originated.',
}),
)),
('it:hosturl', {}, (
('host', ('it:host', {}), {
'ro': True,
'doc': 'Host serving a url.',
}),
('url', ('inet:url', {}), {
'ro': True,
'doc': 'URL available on the host.',
}),
)),
('it:dev:str', {}, (
('norm', ('str', {'lower': True}), {
'doc': 'Lower case normalized version of the it:dev:str.',
}),
)),
('it:sec:cve', {}, (
('desc', ('str', {}), {
'doc': 'A free-form description of the CVE vulnerability.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CVE to a full description.',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the CVE ID.',
}),
)),
('it:sec:cpe', {}, (
('part', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "part" field from the CPE 2.3 string.'}),
('vendor', ('ou:name', {}), {
'ro': True,
'doc': 'The "vendor" field from the CPE 2.3 string.'}),
('product', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "product" field from the CPE 2.3 string.'}),
('version', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "version" field from the CPE 2.3 string.'}),
('update', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "update" field from the CPE 2.3 string.'}),
('edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "edition" field from the CPE 2.3 string.'}),
('language', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "language" field from the CPE 2.3 string.'}),
('sw_edition', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "sw_edition" field from the CPE 2.3 string.'}),
('target_sw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_sw" field from the CPE 2.3 string.'}),
('target_hw', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "target_hw" field from the CPE 2.3 string.'}),
('other', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The "other" field from the CPE 2.3 string.'}),
)),
('it:sec:cwe', {}, (
('name', ('str', {}), {
'doc': 'The CWE description field.',
'ex': 'Buffer Copy without Checking Size of Input (Classic Buffer Overflow)',
}),
('desc', ('str', {}), {
'doc': 'The CWE description field.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A URL linking this CWE to a full description.',
}),
('parents', ('array', {'type': 'it:sec:cwe',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ChildOf CWE Relationships.'
}),
)),
('it:mitre:attack:group', {}, (
('org', ('ou:org', {}), {
'doc': 'Used to map an ATT&CK group to a synapse ou:org.',
}),
('name', ('ou:name', {}), {
'doc': 'The primary name for the ATT&CK group.',
}),
('names', ('array', {'type': 'ou:name', 'uniq': True, 'sorted': True}), {
'doc': 'An array of alternate names for the ATT&CK group.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK group.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK group.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK group ID.',
'ex': 'cno.mitre.g0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK group.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs used by the group.',
}),
('software', ('array', {'type': 'it:mitre:attack:software',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK software IDs used by the group.',
}),
)),
('it:mitre:attack:tactic', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK tactic.',
}),
('desc', ('str', {}), {
'doc': 'A description of the ATT&CK tactic.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK tactic.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK tactic.',
'ex': 'cno.mitre.ta0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK tactic.',
}),
)),
('it:mitre:attack:technique', {}, (
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK technique.',
}),
('status', ('it:mitre:attack:status', {}), {
'doc': 'The status of this ATT&CK technique.',
}),
('isnow', ('it:mitre:attack:technique', {}), {
'doc': 'If deprecated, this field may contain the current value for the technique.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK technique.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK technique.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK technique.',
'ex': 'cno.mitre.t0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK technique.',
}),
('parent', ('it:mitre:attack:technique', {}), {
'doc': 'The parent ATT&CK technique on this sub-technique.',
}),
('tactics', ('array', {'type': 'it:mitre:attack:tactic',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK tactics that include this technique.',
}),
)),
('it:mitre:attack:software', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Used to map an ATT&CK software to a synapse it:prod:soft.',
}),
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK software.',
}),
('names', ('array', {'type': 'str', 'uniq': True, 'sorted': True}), {
'doc': 'Associated names for the ATT&CK software.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK software.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK software.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK software.',
'ex': 'cno.mitre.s0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK software.',
}),
('techniques', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of techniques used by the software.',
}),
)),
('it:mitre:attack:mitigation', {}, (
# TODO map to an eventual risk:mitigation
('name', ('str', {'strip': True}), {
'doc': 'The primary name for the ATT&CK mitigation.',
}),
('desc', ('str', {'strip': True}), {
'doc': 'A description of the ATT&CK mitigation.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'The URL that documents the ATT&CK mitigation.',
}),
('tag', ('syn:tag', {}), {
'doc': 'The synapse tag used to annotate nodes included in this ATT&CK mitigation.',
'ex': 'cno.mitre.m0100',
}),
('references', ('array', {'type': 'inet:url', 'uniq': True}), {
'doc': 'An array of URLs that document the ATT&CK mitigation.',
}),
('addresses', ('array', {'type': 'it:mitre:attack:technique',
'uniq': True, 'sorted': True, 'split': ','}), {
'doc': 'An array of ATT&CK technique IDs addressed by the mitigation.',
}),
)),
('it:dev:int', {}, ()),
('it:dev:pipe', {}, ()),
('it:dev:mutex', {}, ()),
('it:dev:regkey', {}, ()),
('it:dev:regval', {}, (
('key', ('it:dev:regkey', {}), {
'doc': 'The Windows registry key.',
}),
('str', ('it:dev:str', {}), {
'doc': 'The value of the registry key, if the value is a string.',
}),
('int', ('it:dev:int', {}), {
'doc': 'The value of the registry key, if the value is an integer.',
}),
('bytes', ('file:bytes', {}), {
'doc': 'The file representing the value of the registry key, if the value is binary data.',
}),
)),
('it:prod:soft', {}, (
('name', ('str', {'lower': True, 'strip': True}), {
'doc': 'Name of the software.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software.',
}),
('desc', ('str', {}), {
'doc': 'A description of the software.',
'disp': {'hint': 'text'},
}),
('desc:short', ('str', {'lower': True}), {
'doc': 'A short description of the software.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software.',
}),
('author', ('ps:contact', {}), {
'doc': 'The contact information of the org or person who authored the software.',
}),
('author:org', ('ou:org', {}), {
'deprecated': True,
'doc': 'Organization which authored the software.',
}),
('author:acct', ('inet:web:acct', {}), {
'deprecated': True,
'doc': 'Web account of the software author.',
}),
('author:email', ('inet:email', {}), {
'deprecated': True,
'doc': 'Email address of the sofware author.',
}),
('author:person', ('ps:person', {}), {
'deprecated': True,
'doc': 'Person who authored the software.',
}),
('url', ('inet:url', {}), {
'doc': 'URL relevant for the software.',
}),
('isos', ('bool', {}), {
'doc': 'Set to True if the software is an operating system.'}),
('islib', ('bool', {}), {
'doc': 'Set to True if the software is a library.'}),
)),
('it:adid', {}, ()),
('it:os:ios:idfa', {}, ()),
('it:os:android:aaid', {}, ()),
('it:os:android:perm', {}, ()),
('it:os:android:intent', {}, ()),
('it:os:android:reqperm', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The android app which requests the permission.'}),
('perm', ('it:os:android:perm', {}), {'ro': True,
'doc': 'The android permission requested by the app.'}),
)),
('it:prod:softos', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which can run on the operating system.'}),
('os', ('it:prod:softver', {}), {'ro': True,
'doc': 'The operating system which the software can run on.'}),
)),
('it:os:android:ilisten', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which listens for the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is listened for by the app.'}),
)),
('it:os:android:ibroadcast', {}, (
('app', ('it:prod:softver', {}), {'ro': True,
'doc': 'The app software which broadcasts the android intent.'}),
('intent', ('it:os:android:intent', {}), {'ro': True,
'doc': 'The android intent which is broadcast by the app.'}),
)),
('it:prod:softver', {}, (
('software', ('it:prod:soft', {}), {
'doc': 'Software associated with this version instance.',
}),
('software:name', ('str', {'lower': True, 'strip': True}), {
'doc': 'The name of the software at a particular version.',
}),
('names', ('array', {'type': 'it:dev:str', 'uniq': True, 'sorted': True}), {
'doc': 'Observed/variant names for this software version.',
}),
('cpe', ('it:sec:cpe', {}), {
'doc': 'The NIST CPE 2.3 string specifying this software version',
}),
('cves', ('array', {'type': 'it:sec:cve', 'uniq': True, 'sorted': True}), {
'doc': 'A list of CVEs that apply to this software version.',
}),
('vers', ('it:dev:str', {}), {
'doc': 'Version string associated with this version instance.',
}),
('vers:norm', ('str', {'lower': True}), {
'doc': 'Normalized version of the version string.',
}),
('arch', ('it:dev:str', {}), {
'doc': 'Software architecture.',
}),
('released', ('time', {}), {
'doc': 'Timestamp for when this version of the software was released.',
}),
('semver', ('it:semver', {}), {
'doc': 'System normalized semantic version number.',
}),
('semver:major', ('int', {}), {
'doc': 'Version major number.',
}),
('semver:minor', ('int', {}), {
'doc': 'Version minor number.',
}),
('semver:patch', ('int', {}), {
'doc': 'Version patch number.',
}),
('semver:pre', ('str', {}), {
'doc': 'Semver prerelease string.',
}),
('semver:build', ('str', {}), {
'doc': 'Semver build string.',
}),
('url', ('inet:url', {}), {
'doc': 'URL where a specific version of the software is available from.',
}),
)),
('it:prod:softlib', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software version that contains the library.'}),
('lib', ('it:prod:softver', {}), {'ro': True,
'doc': 'The library software version.'}),
)),
('it:prod:softfile', {}, (
('soft', ('it:prod:softver', {}), {'ro': True,
'doc': 'The software which distributes the file.'}),
('file', ('file:bytes', {}), {'ro': True,
'doc': 'The file distributed by the software.'}),
('path', ('file:path', {}), {
'doc': 'The default installation path of the file.'}),
)),
('it:hostsoft', {}, (
('host', ('it:host', {}), {'ro': True,
'doc': 'Host with the software.'}),
('softver', ('it:prod:softver', {}), {'ro': True,
'doc': 'Software on the host.'})
)),
('it:av:sig', {}, (
('soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
('name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.'
}),
('desc', ('str', {}), {
'doc': 'A free-form description of the signature.',
'disp': {'hint': 'text'},
}),
('url', ('inet:url', {}), {
'doc': 'A reference URL for information about the signature.',
})
)),
('it:av:filehit', {}, (
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'ro': True,
'doc': 'The signature that the file triggered on.'
}),
('sig:name', ('str', {'lower': True}), {
'ro': True,
'doc': 'The signature name.',
}),
('sig:soft', ('it:prod:soft', {}), {
'ro': True,
'doc': 'The anti-virus product which contains the signature.',
}),
)),
('it:av:prochit', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The file that triggered the signature hit.',
}),
('sig', ('it:av:sig', {}), {
'doc': 'The signature that the file triggered on.'
}),
('time', ('time', {}), {
'doc': 'The time that the AV engine detected the signature.'
}),
)),
('it:auth:passwdhash', {}, (
('salt', ('hex', {}), {
'doc': 'The (optional) hex encoded salt value used to calculate the password hash.',
}),
('hash:md5', ('hash:md5', {}), {
'doc': 'The MD5 password hash value.',
}),
('hash:sha1', ('hash:sha1', {}), {
'doc': 'The SHA1 password hash value.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'The SHA256 password hash value.',
}),
('hash:sha512', ('hash:sha512', {}), {
'doc': 'The SHA512 password hash value.',
}),
('hash:lm', ('hash:lm', {}), {
'doc': 'The LM password hash value.',
}),
('hash:ntlm', ('hash:ntlm', {}), {
'doc': 'The NTLM password hash value.',
}),
('passwd', ('inet:passwd', {}), {
'doc': 'The (optional) clear text password for this password hash.',
}),
)),
('it:cmd', {}, ()),
('it:exec:proc', {}, (
('host', ('it:host', {}), {
'doc': 'The host that executed the process. May be an actual or a virtual / notional host.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The file considered the "main" executable for the process. For example, rundll32.exe may be considered the "main" executable for DLLs loaded by that program.',
}),
('cmd', ('it:cmd', {}), {
'doc': 'The command string used to launch the process, including any command line parameters.',
'disp': {'hint': 'text'},
}),
('pid', ('int', {}), {
'doc': 'The process ID.',
}),
('time', ('time', {}), {
'doc': 'The start time for the process.',
}),
('exited', ('time', {}), {
'doc': 'The time the process exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code for the process.',
}),
('user', ('inet:user', {}), {
'doc': 'The user name of the process owner.',
}),
('path', ('file:path', {}), {
'doc': 'The path to the executable of the process.',
}),
('src:exe', ('file:path', {}), {
'doc': 'The path to the executable which started the process.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'The process which created the process.'
}),
('killedby', ('it:exec:proc', {}), {
'doc': 'The process which killed this process.',
}),
)),
('it:exec:thread', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process which contains the thread.',
}),
('created', ('time', {}), {
'doc': 'The time the thread was created.',
}),
('exited', ('time', {}), {
'doc': 'The time the thread exited.',
}),
('exitcode', ('int', {}), {
'doc': 'The exit code or return value for the thread.',
}),
('src:proc', ('it:exec:proc', {}), {
'doc': 'An external process which created the thread.',
}),
('src:thread', ('it:exec:thread', {}), {
'doc': 'The thread which created this thread.',
}),
)),
('it:exec:loadlib', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the library was loaded.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the library was loaded in the process.',
}),
('loaded', ('time', {}), {
'doc': 'The time the library was loaded.',
}),
('unloaded', ('time', {}), {
'doc': 'The time the library was unloaded.',
}),
('path', ('file:path', {}), {
'doc': 'The path that the library was loaded from.',
}),
('file', ('file:bytes', {}), {
'doc': 'The library file that was loaded.',
}),
)),
('it:exec:mmap', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The process where the memory was mapped.',
}),
('va', ('int', {}), {
'doc': 'The base memory address where the map was created in the process.',
}),
('size', ('int', {}), {
'doc': 'The size of the memory map in bytes.',
}),
('perms:read', ('bool', {}), {
'doc': 'True if the mmap is mapped with read permissions.',
}),
('perms:write', ('bool', {}), {
'doc': 'True if the mmap is mapped with write permissions.',
}),
('perms:execute', ('bool', {}), {
'doc': 'True if the mmap is mapped with execute permissions.',
}),
('created', ('time', {}), {
'doc': 'The time the memory map was created.',
}),
('deleted', ('time', {}), {
'doc': 'The time the memory map was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The file path if the mmap is a mapped view of a file.',
}),
('hash:sha256', ('hash:sha256', {}), {
'doc': 'A SHA256 hash of the memory map. Bytes may optionally be present in the axon.',
}),
)),
('it:exec:mutex', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the mutex.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the mutex. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the mutex. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the mutex was created.',
}),
('name', ('it:dev:mutex', {}), {
'doc': 'The mutex string.',
}),
)),
('it:exec:pipe', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the named pipe.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the named pipe. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the named pipe. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the named pipe was created.',
}),
('name', ('it:dev:pipe', {}), {
'doc': 'The named pipe string.',
}),
)),
('it:exec:url', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that requested the URL.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that requested the URL. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that requested the URL. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the URL was requested.',
}),
('url', ('inet:url', {}), {
'doc': 'The URL that was requested.',
}),
('client', ('inet:client', {}), {
'doc': 'The address of the client during the URL retrieval.'
}),
('client:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 of the client during the URL retrieval..'
}),
('client:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 of the client during the URL retrieval..'
}),
('client:port', ('inet:port', {}), {
'doc': 'The client port during the URL retrieval..'
}),
)),
('it:exec:bind', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that bound the listening port.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that bound the listening port. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that bound the listening port. May or may not be the same :exe specified in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the port was bound.',
}),
('server', ('inet:server', {}), {
'doc': 'The inet:addr of the server when binding the port.'
}),
('server:ipv4', ('inet:ipv4', {}), {
'doc': 'The IPv4 address specified to bind().'
}),
('server:ipv6', ('inet:ipv6', {}), {
'doc': 'The IPv6 address specified to bind().'
}),
('server:port', ('inet:port', {}), {
'doc': 'The bound (listening) TCP port.'
}),
)),
('it:fs:file', {}, (
('host', ('it:host', {}), {
'doc': 'The host containing the file.',
}),
('path', ('file:path', {}), {
'doc': 'The path for the file.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file on the host.',
}),
('ctime', ('time', {}), {
'doc': 'The file creation time.',
}),
('mtime', ('time', {}), {
'doc': 'The file modification time.',
}),
('atime', ('time', {}), {
'doc': 'The file access time.',
}),
('user', ('inet:user', {}), {
'doc': 'The owner of the file.',
}),
('group', ('inet:user', {}), {
'doc': 'The group owner of the file.',
}),
)),
('it:exec:file:add', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that created the new file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that created the new file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that created the new file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was created.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was created.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was created.',
}),
)),
('it:exec:file:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was deleted.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was deleted.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was deleted.',
}),
)),
('it:exec:file:read', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was read.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was read.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was read.',
}),
)),
('it:exec:file:write', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to / modified the existing file.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the file. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the file. May or may not be the same :exe specified in :proc, if present.'}),
('time', ('time', {}), {
'doc': 'The time the file was written to/modified.',
}),
('path', ('file:path', {}), {
'doc': 'The path where the file was written to/modified.',
}),
('path:dir', ('file:path', {}), {
'ro': True,
'doc': 'The parent directory of the file path (parsed from :path).',
}),
('path:ext', ('str', {'lower': True, 'strip': True}), {
'ro': True,
'doc': 'The file extension of the file name (parsed from :path).',
}),
('path:base', ('file:base', {}), {
'ro': True,
'doc': 'The final component of the file path (parsed from :path).',
}),
('file', ('file:bytes', {}), {
'doc': 'The file that was modified.',
}),
)),
('it:exec:reg:get', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that read the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that read the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that read the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was read.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was read.',
}),
)),
('it:exec:reg:set', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that wrote to the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that wrote to the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that wrote to the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the registry was written to.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was written to.',
}),
)),
('it:exec:reg:del', {}, (
('proc', ('it:exec:proc', {}), {
'doc': 'The main process executing code that deleted data from the registry.',
}),
('host', ('it:host', {}), {
'doc': 'The host running the process that deleted data from the registry. Typically the same host referenced in :proc, if present.',
}),
('exe', ('file:bytes', {}), {
'doc': 'The specific file containing code that deleted data from the registry. May or may not be the same :exe referenced in :proc, if present.',
}),
('time', ('time', {}), {
'doc': 'The time the data from the registry was deleted.',
}),
('reg', ('it:dev:regval', {}), {
'doc': 'The registry key or value that was deleted.',
}),
)),
('it:app:snort:rule', {}, (
('text', ('str', {}), {
'doc': 'The snort rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the snort rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
)),
('it:app:snort:hit', {}, (
('rule', ('it:app:snort:rule', {}), {
'doc': 'The snort rule that matched the file.'}),
('flow', ('inet:flow', {}), {
'doc': 'The inet:flow that matched the snort rule.'}),
('src', ('inet:addr', {}), {
'doc': 'The source address of flow that caused the hit.'}),
('src:ipv4', ('inet:ipv4', {}), {
'doc': 'The source IPv4 address of the flow that caused the hit.'}),
('src:ipv6', ('inet:ipv6', {}), {
'doc': 'The source IPv6 address of the flow that caused the hit.'}),
('src:port', ('inet:port', {}), {
'doc': 'The source port of the flow that caused the hit.'}),
('dst', ('inet:addr', {}), {
'doc': 'The destination address of the trigger.'}),
('dst:ipv4', ('inet:ipv4', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:ipv6', ('inet:ipv6', {}), {
'doc': 'The destination IPv4 address of the flow that caused the hit.'}),
('dst:port', ('inet:port', {}), {
'doc': 'The destination port of the flow that caused the hit.'}),
('time', ('time', {}), {
'doc': 'The time of the network flow that caused the hit.'}),
('sensor', ('it:host', {}), {
'doc': 'The sensor host node that produced the hit.'}),
('version', ('it:semver', {}), {
'doc': 'The version of the rule at the time of match.'}),
)),
('it:app:yara:rule', {}, (
('text', ('str', {}), {
'doc': 'The YARA rule text.',
'disp': {'hint': 'text'},
}),
('name', ('str', {}), {
'doc': 'The name of the YARA rule.'}),
('author', ('ps:contact', {}), {
'doc': 'Contact info for the author of the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The current version of the rule.'}),
('enabled', ('bool', {}), {
'doc': 'The rule enabled status to be used for YARA evaluation engines.'}),
)),
('it:app:yara:match', {}, (
('rule', ('it:app:yara:rule', {}), {
'ro': True,
'doc': 'The YARA rule that matched the file.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that matched the YARA rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:app:yara:procmatch', {}, (
('rule', ('it:app:yara:rule', {}), {
'doc': 'The YARA rule that matched the file.'}),
('proc', ('it:exec:proc', {}), {
'doc': 'The process that matched the YARA rule.'}),
('time', ('time', {}), {
'doc': 'The time that the YARA engine matched the process to the rule.'}),
('version', ('it:semver', {}), {
'doc': 'The most recent version of the rule evaluated as a match.'}),
)),
('it:reveng:function', {}, (
('name', ('str', {}), {
'doc': 'The name of the function.'}),
('description', ('str', {}), {
'doc': 'Notes concerning the function.'}),
('impcalls', ('array', {'type': 'it:reveng:impfunc'}), {
'doc': 'Calls to imported library functions within the scope of the function.',
}),
('strings', ('array', {'type': 'it:dev:str', 'uniq': True}), {
'doc': 'An array of strings referenced within the function.',
}),
)),
('it:reveng:filefunc', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('file', ('file:bytes', {}), {
'ro': True,
'doc': 'The file that contains the function.'}),
('va', ('int', {}), {
'doc': 'The virtual address of the first codeblock of the function.'}),
('rank', ('int', {}), {
'doc': 'The function rank score used to evaluate if it exhibits interesting behavior.'}),
('complexity', ('int', {}), {
'doc': 'The complexity of the function.'}),
('funccalls', ('array', {'type': 'it:reveng:filefunc'}), {
'doc': 'Other function calls within the scope of the function.',
}),
)),
('it:reveng:funcstr', {}, (
('function', ('it:reveng:function', {}), {
'ro': True,
'doc': 'The guid matching the function.'}),
('string', ('str', {}), {
'ro': True,
'doc': 'The string that the function references.'}),
)),
('it:reveng:impfunc', {}, ()),
),
}
name = 'it'
return ((name, modl), )
| 48.807215
| 191
| 0.372506
| 7,160
| 82,533
| 4.283659
| 0.105307
| 0.053406
| 0.019236
| 0.019562
| 0.557954
| 0.477422
| 0.432135
| 0.382381
| 0.338235
| 0.309902
| 0
| 0.00761
| 0.463415
| 82,533
| 1,690
| 192
| 48.836095
| 0.684957
| 0.01855
| 0
| 0.475484
| 0
| 0.016129
| 0.382951
| 0.011878
| 0
| 0
| 0
| 0.000592
| 0
| 1
| 0.005806
| false
| 0.007742
| 0.005161
| 0
| 0.01871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b35eaa0ed1f5a899840c77cec0648c4c36f9761
| 1,447
|
py
|
Python
|
test/test.py
|
bciar/ppp-web
|
1afe39a3c8d2197595ad0e2610c612db210cd62e
|
[
"MIT"
] | 2
|
2018-09-27T03:31:42.000Z
|
2018-09-27T11:11:17.000Z
|
test/test.py
|
bciar/ppp-web
|
1afe39a3c8d2197595ad0e2610c612db210cd62e
|
[
"MIT"
] | null | null | null |
test/test.py
|
bciar/ppp-web
|
1afe39a3c8d2197595ad0e2610c612db210cd62e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit tests."""
import os
import unittest
from copy import copy
from webui.app import create_app
class TestRoutes(unittest.TestCase):
"""Test routes."""
ignore_routes = ('/static/<path:filename>',)
ignore_end_patterns = ('>',)
def setUp(self):
"""Set up: Put Flask app in test mode."""
app = create_app()
self.initial_app = copy(app)
app.testing = True
self.app = app.test_client()
@staticmethod
def valid_route(route):
"""Validate route.
Args:
route (str): Route url pattern.
Returns:
bool: True if valid, else False.
"""
if route in TestRoutes.ignore_routes \
or route.endswith(TestRoutes.ignore_end_patterns):
return False
return True
def test_routes(self):
"""Smoke test routes to ensure no runtime errors.."""
routes = [route.rule for route in self.initial_app.url_map.iter_rules()
if self.valid_route(route.rule)]
for route in routes:
self.app.get(route)
if __name__ == '__main__':
from test.utils.doctest_unittest_runner import doctest_unittest_runner
TEST_DIR = os.path.dirname(os.path.realpath(__file__)) + '/'
doctest_unittest_runner(test_dir=TEST_DIR, relative_path_to_root='../',
package_names=['webui', 'test'])
| 27.826923
| 79
| 0.607464
| 177
| 1,447
| 4.734463
| 0.463277
| 0.0358
| 0.075179
| 0.040573
| 0.112172
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001905
| 0.274361
| 1,447
| 51
| 80
| 28.372549
| 0.79619
| 0.1783
| 0
| 0
| 0
| 0
| 0.040143
| 0.020517
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.178571
| 0
| 0.464286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b36c2213b18abb5b4d5cac68e49cf0ee92025a0
| 6,938
|
py
|
Python
|
tests/sources/test_clang_format.py
|
Justin-Fisher/webots
|
8a39e8e4390612919a8d82c7815aa914f4c079a4
|
[
"Apache-2.0"
] | 1,561
|
2019-09-04T11:32:32.000Z
|
2022-03-31T18:00:09.000Z
|
tests/sources/test_clang_format.py
|
Justin-Fisher/webots
|
8a39e8e4390612919a8d82c7815aa914f4c079a4
|
[
"Apache-2.0"
] | 2,184
|
2019-09-03T11:35:02.000Z
|
2022-03-31T10:01:44.000Z
|
tests/sources/test_clang_format.py
|
Justin-Fisher/webots
|
8a39e8e4390612919a8d82c7815aa914f4c079a4
|
[
"Apache-2.0"
] | 1,013
|
2019-09-07T05:09:32.000Z
|
2022-03-31T13:01:28.000Z
|
#!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the C, C++ and shader source code is compliant with ClangFormat."""
import unittest
import difflib
import os
import subprocess
from io import open
from distutils.spawn import find_executable
class TestClangFormat(unittest.TestCase):
"""Unit test for ClangFormat compliance."""
def setUp(self):
"""Set up called before each test."""
self.WEBOTS_HOME = os.environ['WEBOTS_HOME']
def _runClangFormat(self, f):
"""Run clang format on 'f' file."""
return subprocess.check_output(['clang-format', '-style=file', f])
def test_clang_format_is_correctly_installed(self):
"""Test ClangFormat is correctly installed."""
self.assertTrue(
find_executable('clang-format') is not None,
msg='ClangFormat is not installed on this computer.'
)
clangFormatConfigFile = self.WEBOTS_HOME + os.sep + '.clang-format'
self.assertTrue(
os.path.exists(clangFormatConfigFile),
msg=clangFormatConfigFile + ' not found.'
)
def test_sources_are_clang_format_compliant(self):
"""Test that sources are ClangFormat compliant."""
directories = [
'include/controller',
'projects',
'resources/projects',
'resources/wren/shaders',
'tests',
'include/wren',
'src/controller/c',
'src/controller/cpp',
'src/license/sign',
'src/webots',
'src/wren'
]
skippedPaths = [
'projects/default/controllers/ros/include',
'projects/robots/gctronic/e-puck/transfer',
'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba',
'projects/robots/mobsya/thymio/libraries/dashel',
'projects/robots/mobsya/thymio/libraries/dashel-src',
'projects/robots/robotis/darwin-op/libraries/libssh',
'projects/robots/robotis/darwin-op/libraries/libzip',
'projects/robots/robotis/darwin-op/libraries/robotis-op2/robotis',
'projects/robots/robotis/darwin-op/remote_control/libjpeg-turbo',
'projects/vehicles/controllers/ros_automobile/include',
'src/webots/external'
]
skippedFiles = [
'projects/robots/robotis/darwin-op/plugins/remote_controls/robotis-op2_tcpip/stb_image.h'
]
skippedDirectories = [
'build',
'python',
'java'
]
extensions = ['c', 'h', 'cpp', 'hpp', 'cc', 'hh', 'c++', 'h++', 'vert', 'frag']
modified_files = os.path.join(self.WEBOTS_HOME, 'tests', 'sources', 'modified_files.txt')
sources = []
if os.path.isfile(modified_files):
with open(modified_files, 'r') as file:
for line in file:
line = line.strip()
extension = os.path.splitext(line)[1][1:].lower()
if extension not in extensions:
continue
found = False
for directory in directories:
if line.startswith(directory):
found = True
break
if not found:
continue
found = False
for directory in skippedPaths + skippedFiles:
if line.startswith(directory):
found = True
break
if found:
continue
for directory in skippedDirectories:
currentDirectories = line.split(os.sep)
if directory in currentDirectories:
found = True
if found:
continue
sources.append(line.replace('/', os.sep))
else:
for directory in directories:
path = self.WEBOTS_HOME + os.sep + directory.replace('/', os.sep)
for rootPath, dirNames, fileNames in os.walk(path):
shouldContinue = False
for path in skippedPaths:
if rootPath.startswith(self.WEBOTS_HOME + os.sep + path.replace('/', os.sep)):
shouldContinue = True
break
for directory in skippedDirectories:
currentDirectories = rootPath.replace(self.WEBOTS_HOME, '').split(os.sep)
if directory in currentDirectories:
shouldContinue = True
break
if shouldContinue:
continue
for fileName in fileNames:
extension = os.path.splitext(fileName)[1][1:].lower()
if extension not in extensions:
continue
path = os.path.normpath(os.path.join(rootPath, fileName))
skipFile = False
for file in skippedFiles:
if os.path.normpath((self.WEBOTS_HOME + os.sep + file.replace('/', os.sep))) == path:
skipFile = True
break
if not skipFile:
sources.append(path)
curdir = os.getcwd()
os.chdir(self.WEBOTS_HOME)
for source in sources:
diff = ''
with open(source, encoding='utf8') as file:
try:
for line in difflib.context_diff(self._runClangFormat(source).decode('utf-8').splitlines(),
file.read().splitlines()):
diff += line + '\n'
except UnicodeDecodeError:
self.assertTrue(False, msg='utf-8 decode problem in %s' % source)
self.assertTrue(
len(diff) == 0,
msg='Source file "%s" is not compliant with ClangFormat:\n\nDIFF:%s' % (source, diff)
)
os.chdir(curdir)
if __name__ == '__main__':
unittest.main()
| 41.54491
| 113
| 0.528971
| 670
| 6,938
| 5.413433
| 0.347761
| 0.013786
| 0.03088
| 0.022057
| 0.20182
| 0.137304
| 0.067825
| 0.045216
| 0.022608
| 0
| 0
| 0.005314
| 0.376189
| 6,938
| 166
| 114
| 41.795181
| 0.832717
| 0.121072
| 0
| 0.24812
| 0
| 0.007519
| 0.176743
| 0.106706
| 0
| 0
| 0
| 0
| 0.030075
| 1
| 0.030075
| false
| 0
| 0.045113
| 0
| 0.090226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b37d79a6bcd2f11e42ccf5ea2b0694fffb12722
| 10,361
|
py
|
Python
|
src/python/tests/core/system/shell_test.py
|
sanketsaurav/clusterfuzz
|
9f7efba7781614d50cdc6ab136b9bcf19607731c
|
[
"Apache-2.0"
] | 1
|
2019-04-09T06:40:55.000Z
|
2019-04-09T06:40:55.000Z
|
src/python/tests/core/system/shell_test.py
|
Delaney6/clusterfuzz
|
9eeb08a85869b32733dd54c69b098688ff3b1bf5
|
[
"Apache-2.0"
] | null | null | null |
src/python/tests/core/system/shell_test.py
|
Delaney6/clusterfuzz
|
9eeb08a85869b32733dd54c69b098688ff3b1bf5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""shell tests."""
import mock
import os
import unittest
from pyfakefs import fake_filesystem_unittest
from system import environment
from system import shell
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
class RemoveEmptyFilesTest(fake_filesystem_unittest.TestCase):
"""Tests for remove_empty_files."""
def setUp(self):
# FIXME: Add support for Windows.
if not environment.is_posix():
self.skipTest('Process tests are only applicable for posix platforms.')
test_utils.set_up_pyfakefs(self)
def test_remove(self):
"""Test remove."""
self.fs.CreateFile('/test/aa/bb.txt', contents='s')
self.fs.CreateFile('/test/aa/cc.txt', contents='')
self.fs.CreateFile('/test/aa/aa/dd.txt', contents='s')
self.fs.CreateFile('/test/aa/aa/aa.txt', contents='')
shell.remove_empty_files('/test')
self.assertTrue(os.path.exists('/test/aa/bb.txt'))
self.assertTrue(os.path.exists('/test/aa/aa/dd.txt'))
self.assertFalse(os.path.exists('/test/aa/cc.txt'))
self.assertFalse(os.path.exists('/test/aa/aa/aa.txt'))
def test_ignore_file(self):
self.fs.CreateFile('/test/aa/cc.txt', contents='')
shell.remove_empty_files('/test/aa/cc.txt')
self.assertTrue(os.path.exists('/test/aa/cc.txt'))
@mock.patch('os.remove', autospec=True)
def test_exception(self, mock_remove):
# bypass pyfakefs's os.remove.
os.remove = mock_remove
mock_remove.side_effect = OSError()
self.fs.CreateFile('/test/aa/cc.txt', contents='')
shell.remove_empty_files('/test')
self.assertTrue(os.path.exists('/test/aa/cc.txt'))
class RemoveDirectoryTest(unittest.TestCase):
"""Tests for remove_directory."""
def setUp(self):
test_helpers.patch(self, [
'os.chmod',
'os.mkdir',
'os.path.exists',
'os.system',
'system.environment.platform',
'metrics.logs.log_error',
'metrics.logs.log_warn',
'shutil.rmtree',
])
def _test_remove_os_specific(self, platform, recreate, raise_mkdir_error):
"""Helper for testing removing dir with os-specific command."""
self.mock.platform.return_value = platform
self.mock.exists.side_effect = [True, False, False]
if raise_mkdir_error:
self.mock.mkdir.side_effect = OSError()
result = shell.remove_directory('dir', recreate=recreate)
if recreate:
self.assertEqual(not raise_mkdir_error, result)
else:
self.assertTrue(result)
self.mock.rmtree.assert_has_calls([])
if recreate:
self.mock.mkdir.assert_has_calls([mock.call('dir')])
else:
self.mock.mkdir.assert_has_calls([])
def test_remove_os_specific_windows(self):
"""Test remove with os-specific command on windows."""
self._test_remove_os_specific('WINDOWS', True, False)
self.mock.system.assert_has_calls([mock.call('rd /s /q "dir" > nul 2>&1')])
def test_remove_os_specific_non_windows(self):
"""Test remove with os-specific command on non-windows."""
self._test_remove_os_specific('LINUX', True, False)
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
def test_remove_without_recreate(self):
"""Test remove without recreate."""
self._test_remove_os_specific('LINUX', False, True)
def test_remove_with_mkdir_error(self):
"""Test remove when mkdir errors."""
self._test_remove_os_specific('LINUX', True, True)
def test_remove_shutil_success(self):
"""Test remove with shutil."""
self.mock.exists.side_effect = [True, True, False]
self.assertTrue(shell.remove_directory('dir'))
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
def test_remove_shutil_failure(self):
"""Test remove with shutil but fails."""
self.mock.exists.side_effect = [True, True, True]
self.assertFalse(shell.remove_directory('dir'))
self.mock.log_error.assert_has_calls(
[mock.call('Failed to clear directory dir.')])
self.assertEqual(0, self.mock.log_warn.call_count)
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
def test_remove_shutil_failure_ignore_errors(self):
self.mock.exists.side_effect = [True, True, True]
self.assertFalse(shell.remove_directory('dir', ignore_errors=True))
self.mock.log_warn.assert_has_calls(
[mock.call('Failed to clear directory dir.')])
self.assertEqual(0, self.mock.log_error.call_count)
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
def test_remove_shutil_onerror(self):
"""Test shutil invoking onerror."""
self.mock.exists.side_effect = [True, True, False]
self.assertTrue(shell.remove_directory('dir'))
self.mock.system.assert_has_calls(
[mock.call('rm -rf "dir" > /dev/null 2>&1')])
self.mock.rmtree.assert_has_calls([mock.call('dir', onerror=mock.ANY)])
onerror = self.mock.rmtree.call_args[1]['onerror']
fake_fn = mock.MagicMock()
fake_fn.side_effect = OSError()
onerror(fake_fn, 'dir/child', ImportError())
self.mock.chmod.assert_has_calls([mock.call('dir/child', 0o750)])
fake_fn.assert_has_calls([mock.call('dir/child')])
class GetDirectoryFileCount(fake_filesystem_unittest.TestCase):
"""Tests for get_directory_file_count."""
def setUp(self):
test_utils.set_up_pyfakefs(self)
def test(self):
"""Test get_directory_file_count."""
self.fs.CreateFile('/test/aa/bb.txt', contents='abc')
self.fs.CreateFile('/test/aa/cc.txt', contents='def')
self.fs.CreateFile('/test/aa/aa/aa.txt', contents='ghi')
self.fs.CreateFile('/test/aa/aa/dd.txt', contents='t')
self.assertEqual(shell.get_directory_file_count('/test/aa'), 4)
class GetDirectorySizeTest(fake_filesystem_unittest.TestCase):
"""Tests for get_directory_size."""
def setUp(self):
test_utils.set_up_pyfakefs(self)
def test(self):
"""Test get_directory_size."""
self.fs.CreateFile('/test/aa/bb.txt', contents='abc')
self.fs.CreateFile('/test/aa/cc.txt', contents='def')
self.fs.CreateFile('/test/aa/aa/aa.txt', contents='ghi')
self.fs.CreateFile('/test/aa/aa/dd.txt', contents='t')
self.assertEqual(shell.get_directory_size('/test/aa'), 10)
class WhichTest(fake_filesystem_unittest.TestCase):
"""Tests for which (shutil.which)."""
def setUp(self):
# FIXME: Add support for Windows.
if not environment.is_posix():
self.skipTest('Which test is only supported on posix platforms.')
def test(self):
self.assertEqual('/bin/ls', shell.which('ls'))
class ClearSystemTempDirectoryTest(fake_filesystem_unittest.TestCase):
"""Tests for clear_system_temp_directory."""
def setUp(self):
test_helpers.patch(self, [
'tempfile.gettempdir',
])
self.mock.gettempdir.return_value = '/tmp'
test_utils.set_up_pyfakefs(self)
def test(self):
"""Test clear_system_temp_directory works as expected."""
self.fs.CreateFile('/tmp/aa/bb.txt', contents='abc')
self.fs.CreateFile('/tmp/cc/dd/ee.txt', contents='def')
self.fs.CreateDirectory('/tmp/ff/gg')
self.fs.CreateDirectory('/tmp/hh')
self.fs.CreateDirectory('/unrelated')
self.fs.CreateFile('/unrelated/zz.txt', contents='zzz')
os.symlink('/unrelated/zz.txt', '/tmp/hh/gg.txt')
os.symlink('/unrelated', '/tmp/ii')
shell.clear_system_temp_directory()
self.assertTrue(os.path.exists('/tmp'))
self.assertTrue(os.path.exists('/unrelated'))
self.assertEqual(shell.get_directory_file_count('/tmp'), 0)
self.assertEqual(shell.get_directory_file_count('/unrelated'), 1)
self.assertFalse(os.path.exists('/tmp/aa/bb.txt'))
self.assertFalse(os.path.exists('/tmp/cc/dd/ee.txt'))
self.assertFalse(os.path.exists('/tmp/ff/gg'))
self.assertFalse(os.path.exists('/tmp/hh'))
class GetExecuteCommand(unittest.TestCase):
"""Test that the correct commands to run files are returned."""
def call_and_assert_helper(self, expected_command, file_to_execute):
"""Call get_execute_command on |file_to_execute| and assert result equal to
|expected_command|."""
self.assertEqual(expected_command,
shell.get_execute_command(file_to_execute))
def test_standard_script(self):
"""Test correct command returned for python script."""
script_name = 'script.py'
expected_command = 'python %s' % script_name
self.call_and_assert_helper(expected_command, script_name)
def test_java(self):
"""Test correct launch command returned for Java class."""
script_name = 'javaclassfile.class'
expected_command = 'java javaclassfile'
self.call_and_assert_helper(expected_command, script_name)
def test_binary(self):
"""Test correct launch command returned for a binary (executable) file."""
executable_name = 'executable'
self.call_and_assert_helper(executable_name, executable_name)
executable_name += '.exe'
self.call_and_assert_helper(executable_name, executable_name)
class GetInterpreter(object):
"""Test that the correct interpreters to execute a file are returned."""
def get_interpreted_file_test(self):
"""Test correct interpreter is returned for a file that needs one."""
self.assertEqual('python', shell.get_interpreter('run.py'))
def get_non_interpreter_file_test(self):
"""Test that None is returned for a file that doesn't need one. We don't
want empty string since this is easier to than None. """
self.assertIsNone(shell.get_interpreter('executable'))
| 36.101045
| 79
| 0.705434
| 1,459
| 10,361
| 4.836189
| 0.169294
| 0.031746
| 0.038549
| 0.038265
| 0.559524
| 0.514881
| 0.457625
| 0.397109
| 0.336026
| 0.294218
| 0
| 0.003633
| 0.149792
| 10,361
| 286
| 80
| 36.227273
| 0.797366
| 0.179423
| 0
| 0.363128
| 0
| 0
| 0.151864
| 0.00839
| 0
| 0
| 0
| 0.003497
| 0.27933
| 1
| 0.156425
| false
| 0
| 0.050279
| 0
| 0.251397
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b3842e1a431bbf5fa9d29f78a1c73a20bb3a410
| 2,735
|
py
|
Python
|
Language Model/birnn/model.py
|
osamaqureshi/NLP-for-Urdu
|
864550dbf27244900c2be86e0bedcfb5bb519cb6
|
[
"MIT"
] | 1
|
2020-10-22T20:18:22.000Z
|
2020-10-22T20:18:22.000Z
|
Language Model/birnn/model.py
|
osamaqureshi/NLP-for-Urdu
|
864550dbf27244900c2be86e0bedcfb5bb519cb6
|
[
"MIT"
] | null | null | null |
Language Model/birnn/model.py
|
osamaqureshi/NLP-for-Urdu
|
864550dbf27244900c2be86e0bedcfb5bb519cb6
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
class Bidirectional(tf.keras.Model):
def __init__(self, units: int,
projection_units: int):
super(Bidirectional, self).__init__()
self.units = units
self.projection_units = projection_units
self.Layers = [tf.keras.layers.Bidirectional(tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform',
name='birnn')),
tf.keras.layers.Dense(self.projection_units, name='projection')]
def call(self, inp):
out, _, _ = self.Layers[0](inp)
out = self.Layers[1](out)
return out
class BiRNN(tf.keras.Model):
def __init__(self, units: int,projection_units: int,max_seq_length: int,
vocab_size: int,embedding_dim: int,embedding_matrix = None):
super(BiRNN, self).__init__()
self.units = units
self.projection_units=projection_units
self.max_seq_length = max_seq_length
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.embeddings = tf.keras.layers.Embedding(self.vocab_size, self.embedding_dim,
weights = [embedding_matrix],
trainable=False, name='embeddings')
self.Layers = [Bidirectional(units=self.units, projection_units=self.projection_units),
tf.keras.layers.Add(),
Bidirectional(units=self.units, projection_units=self.projection_units),
tf.keras.layers.Dense(self.vocab_size, activation='softmax', name='softmax')]
def call(self, inp, predict=False):
inp = self.embeddings(inp)
out1 = self.Layers[0](inp)
out2 = self.Layers[1]([inp, out1])
out3 = self.Layers[2](out2)
if predict is False:
return out3
else:
out4 = self.Layers[3](out3)
return out4
def loss_function(real, pred, loss_object):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
def mask_sequences(seq, t):
mask = np.zeros(seq.shape)
mask[:,:t] = 1
inp = tf.math.multiply(seq, mask)
mask[:,:t+1] = 1
tar = tf.math.multiply(seq, mask)
return inp, tar
| 42.734375
| 113
| 0.545521
| 299
| 2,735
| 4.789298
| 0.267559
| 0.115223
| 0.054469
| 0.067039
| 0.34148
| 0.255587
| 0.255587
| 0.255587
| 0.255587
| 0.255587
| 0
| 0.010728
| 0.352468
| 2,735
| 64
| 114
| 42.734375
| 0.797854
| 0
| 0
| 0.070175
| 0
| 0
| 0.019371
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.035088
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b3934ad826855dff168d0197fe9075473c458c0
| 20,474
|
py
|
Python
|
viz_utils/eoa_viz.py
|
olmozavala/eoas-pyutils
|
f552a512e250f8aa16e1f3ababf8b4644253918b
|
[
"MIT"
] | null | null | null |
viz_utils/eoa_viz.py
|
olmozavala/eoas-pyutils
|
f552a512e250f8aa16e1f3ababf8b4644253918b
|
[
"MIT"
] | null | null | null |
viz_utils/eoa_viz.py
|
olmozavala/eoas-pyutils
|
f552a512e250f8aa16e1f3ababf8b4644253918b
|
[
"MIT"
] | null | null | null |
import os
from PIL import Image
import cv2
from os import listdir
from os.path import join
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import LogNorm
from io_utils.io_common import create_folder
from viz_utils.constants import PlotMode, BackgroundType
import pylab
import numpy as np
import cmocean
import shapely
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy
def select_colormap(field_name):
'''
Based on the name if the field it chooses a colormap from cmocean
Args:
field_name:
Returns:
'''
if np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]):
# cmaps_fields.append(cmocean.cm.deep_r)
return cmocean.cm.curl
elif np.any([field_name.find(x) != -1 for x in ('temp', 'sst', 'temperature')]):
return cmocean.cm.thermal
elif np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]):
return cmocean.cm.curl
elif np.any([field_name.find(x) != -1 for x in ('salin', 'sss', 'sal')]):
return cmocean.cm.haline
elif field_name.find('error') != -1:
return cmocean.cm.diff
elif field_name.find('binary') != -1:
return cmocean.cm.oxy
elif np.any([field_name.find(x) != -1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]):
return cmocean.cm.speed
class EOAImageVisualizer:
"""This class makes plenty of plots assuming we are plotting Geospatial data (maps).
It is made to read xarrays, numpy arrays, and numpy arrays in dictionaries
vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output',
lats=[lats],lons=[lons])
"""
_COLORS = ['y', 'r', 'c', 'b', 'g', 'w', 'k', 'y', 'r', 'c', 'b', 'g', 'w', 'k']
_figsize = 8
_font_size = 30
_units = ''
_max_imgs_per_row = 4
_mincbar = np.nan # User can set a min and max colorbar values to 'force' same color bar to all plots
_maxcbar = np.nan
_flip_data = True
_eoas_pyutils_path = './eoas_pyutils'# This is the path where the eoas_utils folder is stored with respect to the main project
_contourf = False # When plotting non-regular grids and need precision
_background = BackgroundType.BLUE_MARBLE_LR # Select the background to use
_auto_colormap = True # Selects the colormap based on the name of the field
_show_var_names = False # Includes the name of the field name in the titles
_additional_polygons = [] # MUST BE SHAPELY GEOMETRIES In case we want to include additional polygons in the plots (all of them)
# If you want to add a streamplot of a vector field. It must be a dictionary with keys x,y,u,v
# and optional density, color, cmap, arrowsize, arrowstyle, minlength
_vector_field = None
_norm = None # Use to normalize the colormap. For example with LogNorm
# vizobj = EOAImageVisualizer(disp_images=True, output_folder='output',
# lats=[lats],lons=[lons])
def __init__(self, disp_images=True, output_folder='output',
lats=[-90,90], lons =[-180,180],
projection=ccrs.PlateCarree(), **kwargs):
# All the arguments that are passed to the constructor of the class MUST have its name on it.
self._disp_images = disp_images
self._output_folder = output_folder
self._projection = projection
bbox = self.getExtent(lats, lons)
self._extent = bbox
self._lats = lats
self._lons = lons
self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2])
self._contour_labels = False
for arg_name, arg_value in kwargs.items():
self.__dict__["_" + arg_name] = arg_value
print(self.__dict__["_" + arg_name])
def __getattr__(self, attr):
'''Generic getter for all the properties of the class'''
return self.__dict__["_" + attr]
def __setattr__(self, attr, value):
'''Generic setter for all the properties of the class'''
self.__dict__["_" + attr] = value
def add_colorbar(self, fig, im, ax, show_color_bar, label=""):
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html
if show_color_bar:
font_size_cbar = self._font_size * .5
# TODO how to make this automatic and works always
cbar = fig.colorbar(im, ax=ax, shrink=.7)
cbar.ax.tick_params(labelsize=font_size_cbar)
if label != "":
cbar.set_label(label, fontsize=font_size_cbar*1.2)
else:
cbar.set_label(self._units, fontsize=font_size_cbar*1.2)
def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None:
"""
Plots a 2D img for EOA data.
:param c_img: 2D array
:param ax: geoaxes
:return:
"""
c_ax = ax
if self._flip_data:
origin = 'lower'
else:
origin = 'upper'
if self._background == BackgroundType.CARTO_DEF:
c_ax.stock_img()
else:
if self._background == BackgroundType.BLUE_MARBLE_LR:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png'))
if self._background == BackgroundType.BLUE_MARBLE_HR:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg'))
if self._background == BackgroundType.TOPO:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png'))
if self._background == BackgroundType.BATHYMETRY:
img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg'))
c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree())
if mode == PlotMode.RASTER or mode == PlotMode.MERGED:
if self._contourf:
im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent)
else:
if np.isnan(mincbar):
im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm)
else:
im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm)
if mode == PlotMode.CONTOUR or mode == PlotMode.MERGED:
c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons)))
if mode == PlotMode.CONTOUR:
im = c_ax.contour(c_img, extent=self._extent, transform=self._projection)
if mode == PlotMode.MERGED:
if self._contour_labels:
c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection)
else:
c_ax.contour(c_img, extent=self._extent, transform=self._projection)
if len(self._additional_polygons) > 0:
pol_lats = []
pol_lons = []
for c_polygon in self._additional_polygons:
if isinstance(c_polygon, shapely.geometry.linestring.LineString):
x,y = c_polygon.xy
elif isinstance(c_polygon, shapely.geometry.polygon.Polygon):
x, y = c_polygon.exterior.xy
pol_lats += y
pol_lons += x
c_ax.plot(x,y, transform=self._projection, c='r')
# Adds a threshold to the plot to see the polygons
c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5))
if self._vector_field != None:
try:
u = self._vector_field['u']
v = self._vector_field['v']
x = self._vector_field['x']
y = self._vector_field['y']
vec_keys = self._vector_field.keys()
c = 'r'
density = 1
linewidth = 3
vec_cmap = cmocean.cm.solar
if 'color' in vec_keys:
c = self._vector_field['color']
if 'density' in vec_keys:
density = self._vector_field['density']
if 'linewidth' in vec_keys:
linewidth = self._vector_field['linewidth']
if 'cmap' in vec_keys:
vec_cmap = self._vector_field['cmap']
c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons)))
c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c,
cmap=vec_cmap, linewidth=linewidth)
except Exception as e:
print(F"Couldn't add vector field e:{e}")
gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--')
# gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'}
font_coords = {'size': self._font_size*.6}
gl.xlabel_style = font_coords
gl.ylabel_style = font_coords
gl.top_labels = False
gl.right_labels = False
return im
def get_proper_size(self, rows, cols):
"""
Obtains the proper size for a figure.
:param rows: how many rows will the figure have
:param cols: how many colswill the figure have
:param prop: Proportion is the proportion to use w/h
:return:
"""
if rows == 1:
return self._figsize * cols * self._fig_prop, self._figsize
else:
return self._figsize * cols * self._fig_prop, self._figsize * rows
def _close_figure(self):
"""Depending on what is disp_images, the figures are displayed or just closed"""
if self._disp_images:
plt.show()
else:
plt.close()
def getExtent(self, lats, lons, expand_ext=0.0):
'''
Obtains the bbox of the coordinates. If included threshold then increases the bbox in all directions with that thres
Args:
lats:
lons:
inc_threshold:
Returns:
'''
minLat = np.amin(lats) - expand_ext
maxLat = np.amax(lats) + expand_ext
minLon = np.amin(lons) - expand_ext
maxLon = np.amax(lons) + expand_ext
bbox = (minLon, maxLon, minLat, maxLat)
return bbox
def xr_summary(self, ds):
""" Prints a summary of the netcdf (global attributes, variables, etc)
:param ds:
:return:
"""
print("\n========== Global attributes =========")
for name in ds.attrs:
print(F"{name} = {getattr(ds, name)}")
print("\n========== Dimensions =========")
for name in ds.dims:
print(F"{name}: {ds[name].shape}")
print("\n========== Coordinates =========")
for name in ds.coords:
print(F"{name}: {ds[name].shape}")
print("\n========== Variables =========")
for cur_variable_name in ds.variables:
cur_var = ds[cur_variable_name]
print(F"{cur_variable_name}: {cur_var.dims} {cur_var.shape}")
def nc_summary(self, ds):
""" Prints a summary of the netcdf (global attributes, variables, etc)
:param ds:
:return:
"""
print("\n========== Global attributes =========")
for name in ds.ncattrs():
print(F"{name} = {getattr(ds, name)}")
print("\n========== Variables =========")
netCDFvars = ds.variables
for cur_variable_name in netCDFvars.keys():
cur_var = ds.variables[cur_variable_name]
print(F"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}")
def add_roads(self, ax):
# Names come from: https://www.naturalearthdata.com/features/
# -- Add states
roads = cfeature.NaturalEarthFeature(
category='cultural',
name='roads',
scale='10m',
facecolor='none')
ax.add_feature(roads, edgecolor='black')
return ax
def add_states(self, ax):
# Names come from: https://www.naturalearthdata.com/features/
# -- Add states
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none')
ax.add_feature(states_provinces, edgecolor='gray')
return ax
def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''):
'''
This function plots points in a map
:param bbox:
:return:
'''
if bbox is None:
bbox = (-180, 180, -90, 90)
if lats is None:
lats = self.lats
if lons is None:
lons = self.lons
fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()})
ax.set_extent(bbox) # If we do not set this, it will cropp it to the limits of the locations
ax.gridlines()
im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap)
fig.colorbar(im, ax=ax, shrink=0.7)
ax.coastlines()
plt.title(title)
plt.show()
def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='',
file_name_prefix='', cmap=None, z_names = [],
show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):
"""
Plots multiple z_levels for multiple fields.
It uses rows for each depth, and columns for each variable
"""
create_folder(self._output_folder)
orig_cmap = cmap
# If the user do not requires any z-leve, then all are plotted
if len(z_levels) == 0:
z_levels = range(np_variables[var_names[0]].shape[0])
cols = np.min((self._max_imgs_per_row, len(var_names)))
if cols == len(var_names):
rows = len(z_levels)
else:
rows = int(len(z_levels) * np.ceil(len(var_names)/cols))
fig, _axs = plt.subplots(rows, cols,
figsize=self.get_proper_size(rows, cols),
subplot_kw={'projection': self._projection})
for c_zlevel, c_slice in enumerate(z_levels): # Iterates over the z-levels
# Verify the index of the z_levels are the original ones.
if len(z_names) != 0:
c_slice_txt = z_names[c_slice]
else:
c_slice_txt = c_slice
c_mincbar = np.nan
c_maxcbar = np.nan
for idx_var, c_var in enumerate(var_names): # Iterate over the fields
if rows*cols == 1: # Single figure
ax = _axs
else:
ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var]
# Here we chose the min and max colorbars for each field
if not(np.all(np.isnan(mincbar))):
if type(mincbar) is list:
c_mincbar = mincbar[idx_var]
else:
c_mincbar = mincbar
if not(np.all(np.isnan(maxcbar))):
if type(mincbar) is list:
c_maxcbar = maxcbar[idx_var]
else:
c_maxcbar = maxcbar
# By default we select the colorbar from the name of the variable
if self._auto_colormap and orig_cmap is None:
cmap = select_colormap(c_var)
else:
# If there is an array of colormaps we select the one for this field
if type(orig_cmap) is list:
cmap = orig_cmap[idx_var]
else:
# If it is just one cmap, then we use it for all the fields
cmap = orig_cmap
im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode,
mincbar=c_mincbar, maxcbar=c_maxcbar)
if self._show_var_names:
c_title = F'{var_names[idx_var]} {title}'
else:
c_title = F'{title}'
if len(z_levels) > 1:
c_title += F"Z - level: {c_slice_txt}"
ax.set_title(c_title, fontsize=self._font_size)
self.add_colorbar(fig, im, ax, show_color_bar)
plt.tight_layout(pad=.5)
file_name = F'{file_name_prefix}'
pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight')
self._close_figure()
def plot_2d_data_xr(self, np_variables:list, var_names:list, title='',
file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):
'''
Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting
:param np_variables:
:param var_names:
:param title:
:param file_name_prefix:
:param cmap:
:param flip_data:
:param rot_90:
:param show_color_bar:
:param plot_mode:
:param mincbar:
:param maxcbar:
:return:
'''
npdict_3d = {}
for i, field_name in enumerate(var_names):
npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0)
self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title,
file_name_prefix=file_name_prefix, cmap=cmap, z_names = [],
show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar)
def plot_2d_data_np(self, np_variables:list, var_names:list, title='',
file_name_prefix='', cmap=None, flip_data=False,
rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):
'''
Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting
:param np_variables: Numpy variables. They can be with shape [fields, x, y] or just a single field with shape [x,y]
:param var_names:
:param title:
:param file_name_prefix:
:param cmap:
:param flip_data:
:param rot_90:
:param show_color_bar:
:param plot_mode:
:param mincbar:
:param maxcbar:
:return:
'''
npdict_3d = {}
for i, field_name in enumerate(var_names):
if len(np_variables.shape) == 3:
c_np_data = np_variables[i, :, :]
else:
c_np_data = np_variables # Single field
if rot_90:
c_np_data = np.rot90(c_np_data)
if flip_data:
c_np_data = np.flip(np.flip(c_np_data), axis=1)
npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0)
self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title,
file_name_prefix=file_name_prefix, cmap=cmap, z_names = [],
show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar)
def make_video_from_images(self, input_folder, output_file, fps=24):
files = listdir(input_folder)
files.sort()
print(F"Generating video file: {output_file}")
out_video = -1
for i, file_name in enumerate(files[0:36]):
if i % 10 == 0:
print(F"Adding file # {i}: {file_name}")
c_file = join(input_folder, file_name)
im = Image.open(c_file)
np_im = np.asarray(im)[:, :, :3]
if i == 0:
video_size = (np_im.shape[1], np_im.shape[0])
out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True)
out_video.write(np_im[:, :, ::-1])
out_video.release()
cv2.destroyAllWindows()
print("Done! yeah babe!")
| 41.869121
| 163
| 0.575657
| 2,634
| 20,474
| 4.258162
| 0.190205
| 0.012839
| 0.012839
| 0.006241
| 0.339515
| 0.294401
| 0.251605
| 0.234041
| 0.224055
| 0.209344
| 0
| 0.010745
| 0.313617
| 20,474
| 489
| 164
| 41.869121
| 0.787376
| 0.189216
| 0
| 0.149847
| 0
| 0
| 0.070212
| 0.010084
| 0
| 0
| 0
| 0.002045
| 0
| 1
| 0.055046
| false
| 0
| 0.051988
| 0
| 0.201835
| 0.051988
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b39d4fb43437addee89cd08745a9f78f2bca971
| 1,414
|
py
|
Python
|
ade20kScripts/setup.py
|
fcendra/PSPnet18
|
bc4f4292f4ddd09dba7076ca0b587c8f60dfa043
|
[
"MIT"
] | 1
|
2020-08-16T14:27:31.000Z
|
2020-08-16T14:27:31.000Z
|
ade20kScripts/setup.py
|
fcendra/PSPNet.pytorch
|
bc4f4292f4ddd09dba7076ca0b587c8f60dfa043
|
[
"MIT"
] | null | null | null |
ade20kScripts/setup.py
|
fcendra/PSPNet.pytorch
|
bc4f4292f4ddd09dba7076ca0b587c8f60dfa043
|
[
"MIT"
] | null | null | null |
from os import listdir
from os.path import isfile, join
from path import Path
import numpy as np
import cv2
# Dataset path
target_path = Path('target/')
annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath()
dataset = [ f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))]
images = np.empty(len(dataset), dtype = object)
count = 1
# Iterate all Training Images
for n in range(0, len(dataset)):
# Read image
images[n] = cv2.imread(join(annotation_images_path,dataset[n]))
# Convert it to array
array = np.asarray(images[n],dtype=np.int8)
# Conditions when the value equal less than 1, change it to 255.
# If it is >= 1, increment it by -1
arr = np.where(array < 1, 255, array -1)
#Saved it to another file
if count < 10:
cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + ".png", arr)
elif count < 100 and count > 9:
cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + ".png", arr)
elif count < 1000 and count > 99:
cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + ".png", arr)
elif count < 10000 and count > 999:
cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + ".png", arr)
else:
cv2.imwrite(target_path +'ADE_train_000'+ str(count) + ".png", arr)
print(str(count) + ".png is printed")
count += 1
| 34.487805
| 97
| 0.65983
| 213
| 1,414
| 4.267606
| 0.384977
| 0.066007
| 0.072607
| 0.110011
| 0.229923
| 0.229923
| 0
| 0
| 0
| 0
| 0
| 0.062106
| 0.214286
| 1,414
| 40
| 98
| 35.35
| 0.756076
| 0.136492
| 0
| 0
| 0
| 0
| 0.126238
| 0.029703
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.192308
| 0
| 0.192308
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b3bed4772887b4ca3b8868f07f00b80ff44103a
| 1,503
|
py
|
Python
|
Core/managers/InputPeripherals.py
|
Scoppio/Rogue-EVE
|
a46f1faa9c7835e8c5838f6270fb5d75b349936b
|
[
"MIT"
] | 2
|
2016-11-07T23:43:17.000Z
|
2016-11-08T21:49:57.000Z
|
Core/managers/InputPeripherals.py
|
Scoppio/Rogue-EVE
|
a46f1faa9c7835e8c5838f6270fb5d75b349936b
|
[
"MIT"
] | null | null | null |
Core/managers/InputPeripherals.py
|
Scoppio/Rogue-EVE
|
a46f1faa9c7835e8c5838f6270fb5d75b349936b
|
[
"MIT"
] | null | null | null |
import logging
from models.GenericObjects import Vector2
logger = logging.getLogger('Rogue-EVE')
class MouseController(object):
"""
Mouse controller needs the map, get over it
"""
def __init__(self, map=None, object_pool=None):
self.mouse_coord = (0, 0)
self.map = map
self.object_pool = object_pool
self.camera = None
def set_map(self, map):
self.map = map
def set_object_pool(self, object_pool):
self.object_pool = object_pool
def get_mouse_coord(self):
return self.mouse_coord
def set_mouse_coord(self, new_coord):
self.mouse_coord = new_coord
logger.debug("mouse position {}".format(self.mouse_coord))
def get_names_under_mouse(self):
# return a string with the names of all objects under the mouse
(x, y) = self.camera.camera_coord + Vector2(*self.mouse_coord)
# create a list with the names of all objects at the mouse's coordinates and in FOV
objects = self.object_pool.get_objects_as_list()
names = ""
if self.map and self.object_pool:
if objects and self.map:
names = [obj.name for obj in objects
if obj.coord.X == x and obj.coord.Y == y and (x,y) in self.map.get_visible_tiles()]
names = ', '.join(names) # join the names, separated by commas
else:
logger.warning("map or object pool not initialized!")
return names.capitalize()
| 33.4
| 109
| 0.630739
| 209
| 1,503
| 4.368421
| 0.334928
| 0.109529
| 0.07667
| 0.043812
| 0.131435
| 0.052574
| 0
| 0
| 0
| 0
| 0
| 0.003687
| 0.27811
| 1,503
| 44
| 110
| 34.159091
| 0.837788
| 0.149035
| 0
| 0.133333
| 0
| 0
| 0.04996
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.066667
| 0.033333
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b3c8e618e44b6365d5b13bea7673584e02f77cc
| 1,652
|
py
|
Python
|
the_unsync/thesync.py
|
vromanuk/async_techniques
|
7e1c6efcd4c81c322002eb3002d5bb929c5bc623
|
[
"MIT"
] | null | null | null |
the_unsync/thesync.py
|
vromanuk/async_techniques
|
7e1c6efcd4c81c322002eb3002d5bb929c5bc623
|
[
"MIT"
] | null | null | null |
the_unsync/thesync.py
|
vromanuk/async_techniques
|
7e1c6efcd4c81c322002eb3002d5bb929c5bc623
|
[
"MIT"
] | null | null | null |
from unsync import unsync
import asyncio
import datetime
import math
import aiohttp
import requests
def main():
t0 = datetime.datetime.now()
tasks = [
compute_some(),
compute_some(),
compute_some(),
download_some(),
download_some(),
download_some(),
download_some_more(),
download_some_more(),
wait_some(),
wait_some(),
wait_some(),
wait_some()]
[t.result() for t in tasks]
dt = datetime.datetime.now() - t0
print('Unsync version done in {:,.2f} seconds.'.format(dt.total_seconds()))
@unsync(cpu_bound=True)
def compute_some():
print('Computing...')
for _ in range(1, 10_000_000):
math.sqrt(25 ** 25 + .01)
@unsync()
async def download_some():
print('Downloading...')
url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2'
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session:
async with session.get(url) as resp:
resp.raise_for_status()
text = await resp.text()
print('Downloaded (more) {:,} characters'.format(len(text)))
@unsync()
def download_some_more():
print('Downloading more...')
url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled'
resp = requests.get(url)
resp.raise_for_status()
text = resp.text
print('Downloaded (more) {:,} characters'.format(len(text)))
@unsync()
async def wait_some():
print('Waiting...')
for _ in range(1, 1000):
await asyncio.sleep(.001)
if __name__ == '__main__':
main()
| 24.656716
| 99
| 0.626513
| 203
| 1,652
| 4.916256
| 0.423645
| 0.084168
| 0.064128
| 0.072144
| 0.240481
| 0.196393
| 0.164329
| 0.112224
| 0.112224
| 0.112224
| 0
| 0.025
| 0.225182
| 1,652
| 66
| 100
| 25.030303
| 0.754688
| 0
| 0
| 0.339623
| 0
| 0.037736
| 0.196126
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.113208
| 0
| 0.169811
| 0.132075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b4010a8299e923b75856db3391db03cdf9dc135
| 641
|
py
|
Python
|
app.py
|
rghose/lol3
|
c902e61bd5d69c541b46c834a5183e4da8eec591
|
[
"BSD-2-Clause"
] | null | null | null |
app.py
|
rghose/lol3
|
c902e61bd5d69c541b46c834a5183e4da8eec591
|
[
"BSD-2-Clause"
] | null | null | null |
app.py
|
rghose/lol3
|
c902e61bd5d69c541b46c834a5183e4da8eec591
|
[
"BSD-2-Clause"
] | null | null | null |
from flask import *
app = Flask(__name__)
import botty
# ----------------------------------
@app.route("/", methods=['GET', 'POST'])
def hello():
if request.method == 'POST':
data = request.form["query"]
return render_template("index.html",data=data)
return render_template("main.html")
# -----------------------------------
# -----------------------------------
@app.route("/request", methods=['POST'])
def respond():
data = request.form["data"]
return botty.botty_get_response(data)
# -----------------------------------
if __name__ == "__main__":
app.debug = True
app.run(host="0.0.0.0")
| 23.740741
| 54
| 0.483619
| 65
| 641
| 4.523077
| 0.476923
| 0.020408
| 0.102041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007394
| 0.156006
| 641
| 26
| 55
| 24.653846
| 0.536044
| 0.221529
| 0
| 0
| 0
| 0
| 0.135628
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b40618c90c089307047e8b7e28b599c38d7a399
| 451
|
py
|
Python
|
config.py
|
metarom-quality/gooseberry
|
544503c52edd360a53d09f69ea6b4a0645aa617a
|
[
"MIT"
] | null | null | null |
config.py
|
metarom-quality/gooseberry
|
544503c52edd360a53d09f69ea6b4a0645aa617a
|
[
"MIT"
] | null | null | null |
config.py
|
metarom-quality/gooseberry
|
544503c52edd360a53d09f69ea6b4a0645aa617a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
DATABASE="/home/tomate/Warehouse/syte/meta.db"
XLSDIR = "/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/"
temp = [i for i in next(os.walk(XLSDIR))[2] if i.endswith("xlsx") or i.endswith("xls")]
flist = {}
for i in temp:
name = i.split(" ")[0].split("-")[0].split(".")[0]
if name.startswith("~") or name.startswith("PR") or name.startswith("FAB"):
continue
else:
flist[name] = i
| 26.529412
| 87
| 0.627494
| 69
| 451
| 4.101449
| 0.623188
| 0.063604
| 0.042403
| 0.084806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013228
| 0.161863
| 451
| 16
| 88
| 28.1875
| 0.73545
| 0.046563
| 0
| 0
| 0
| 0
| 0.247086
| 0.20979
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b41d9378cb46e318f4cb6580acecc3d11ab3c3b
| 1,054
|
py
|
Python
|
setup.py
|
markostrajkov/range-requests-proxy
|
74d4bfee93098854c7b9f723c03c2316e729f295
|
[
"BSD-3-Clause"
] | 1
|
2016-08-14T14:12:04.000Z
|
2016-08-14T14:12:04.000Z
|
setup.py
|
markostrajkov/range-requests-proxy
|
74d4bfee93098854c7b9f723c03c2316e729f295
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
markostrajkov/range-requests-proxy
|
74d4bfee93098854c7b9f723c03c2316e729f295
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='range-requests-proxy',
version='0.1',
description='Asynchronous HTTP proxy for HTTP Range Requests',
author='Marko Trajkov',
author_email='markostrajkov@gmail.com',
cmdclass={'test': PyTest},
tests_require=['pytest>=2.8.0', 'mock==2.0.0'],
install_requires=['tornado==4.4.1', 'pycurl==7.43.0'],
packages=['rangerequestsproxy'],
license='BSD',
url='https://github.com/markostrajkov/range-requests-proxy',
)
| 26.35
| 76
| 0.665085
| 132
| 1,054
| 5.212121
| 0.545455
| 0.063953
| 0.061047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017647
| 0.193548
| 1,054
| 39
| 77
| 27.025641
| 0.791765
| 0.018975
| 0
| 0
| 0
| 0
| 0.270087
| 0.022265
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0.034483
| 0.137931
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b42790dafdbd5621ed121da922a0750203f73ba
| 918
|
py
|
Python
|
tests/pytorch_pfn_extras_tests/onnx/test_load_model.py
|
kmaehashi/pytorch-pfn-extras
|
70b5db0dad8a8e342cc231e8a18c6f32ce250d1c
|
[
"MIT"
] | 243
|
2020-05-12T01:15:46.000Z
|
2022-03-21T22:07:57.000Z
|
tests/pytorch_pfn_extras_tests/onnx/test_load_model.py
|
kmaehashi/pytorch-pfn-extras
|
70b5db0dad8a8e342cc231e8a18c6f32ce250d1c
|
[
"MIT"
] | 495
|
2020-05-12T06:45:12.000Z
|
2022-03-31T07:14:02.000Z
|
tests/pytorch_pfn_extras_tests/onnx/test_load_model.py
|
kmaehashi/pytorch-pfn-extras
|
70b5db0dad8a8e342cc231e8a18c6f32ce250d1c
|
[
"MIT"
] | 37
|
2020-05-12T02:16:07.000Z
|
2021-08-11T06:00:16.000Z
|
import os
import pytest
import torch
import pytorch_pfn_extras.onnx as tou
from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net
@pytest.mark.filterwarnings("ignore:Named tensors .* experimental:UserWarning")
def test_onnx_load_model():
model = Net()
outdir = "out/load_model_test"
tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir,
training=True, do_constant_folding=False)
tou.load_model(os.path.join(outdir, "model.onnx"))
@pytest.mark.filterwarnings("ignore:.*ONNX contains stripped .*:UserWarning")
def test_stripped_onnx_load_model():
model = Net()
outdir = "out/stripped_load_model_test"
tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir,
strip_large_tensor_data=True, training=True,
do_constant_folding=False)
tou.load_model(os.path.join(outdir, "model.onnx"))
| 34
| 79
| 0.704793
| 124
| 918
| 4.975806
| 0.362903
| 0.08752
| 0.051864
| 0.097245
| 0.508914
| 0.508914
| 0.508914
| 0.411669
| 0.411669
| 0.411669
| 0
| 0.016021
| 0.184096
| 918
| 26
| 80
| 35.307692
| 0.807744
| 0
| 0
| 0.3
| 0
| 0
| 0.175381
| 0.056645
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.25
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b433031281aa45b18a53118e3852e760126a4ce
| 867
|
py
|
Python
|
validate/v1/base.py
|
huzidabanzhang/Python
|
7b304290e5be7db4bce253edb069a12dcbc3c998
|
[
"MIT"
] | 4
|
2019-09-04T09:16:24.000Z
|
2019-09-18T08:50:36.000Z
|
validate/v1/base.py
|
huzidabanzhang/Python
|
7b304290e5be7db4bce253edb069a12dcbc3c998
|
[
"MIT"
] | null | null | null |
validate/v1/base.py
|
huzidabanzhang/Python
|
7b304290e5be7db4bce253edb069a12dcbc3c998
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
'''
@Description: 数据库验证器
@Author: Zpp
@Date: 2020-05-28 13:44:29
@LastEditors: Zpp
@LastEditTime: 2020-05-28 14:02:02
'''
params = {
# 验证字段
'fields': {
'type': {
'name': '导出类型',
'type': 'int',
'between': [1, 2, 3],
'required': True
},
'document': {
'name': '数据库文件',
'type': 'file',
'required': True,
'msg': '请选择上传数据库文件'
},
'admin_id': {
'name': '管理员编号',
'type': 'str',
'required': True
},
'time': {
'name': '查询时间',
'type': 'str',
'required': True
}
},
# 导出数据库
'Export': ['type'],
# 导入数据库
'Import': ['document'],
# 首页登录清空
'Login': ['admin_id', 'time']
}
| 19.704545
| 34
| 0.392157
| 76
| 867
| 4.447368
| 0.684211
| 0.142012
| 0.047337
| 0.112426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062257
| 0.407151
| 867
| 43
| 35
| 20.162791
| 0.595331
| 0.206459
| 0
| 0.172414
| 0
| 0
| 0.275556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b434ec1049bc3564470ff973bc2f2c30ca659c6
| 329
|
py
|
Python
|
example/speech_recognition/stt_layer_slice.py
|
axbaretto/mxnet
|
5f593885356ff6d14f5519fa18e79b944beb51cd
|
[
"Apache-2.0"
] | 92
|
2017-04-25T15:40:55.000Z
|
2022-03-28T17:54:53.000Z
|
example/speech_recognition/stt_layer_slice.py
|
yanghaojin/BMXNet
|
102f8d0ed59529bbd162c37bf07ae58ad6c4caa1
|
[
"Apache-2.0"
] | 18
|
2017-05-15T05:16:41.000Z
|
2019-06-14T06:02:08.000Z
|
example/speech_recognition/stt_layer_slice.py
|
yanghaojin/BMXNet
|
102f8d0ed59529bbd162c37bf07ae58ad6c4caa1
|
[
"Apache-2.0"
] | 39
|
2017-04-23T12:38:45.000Z
|
2021-04-04T05:01:03.000Z
|
import mxnet as mx
def slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True):
net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis)
hidden_all = []
for seq_index in range(seq_len):
hidden_all.append(net[seq_index])
net = hidden_all
return net
| 29.909091
| 98
| 0.726444
| 54
| 329
| 4.12963
| 0.537037
| 0.080717
| 0.089686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00369
| 0.176292
| 329
| 10
| 99
| 32.9
| 0.819188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b4469c0d369d163f87c18b571da60869e4d600b
| 8,000
|
py
|
Python
|
api/auth.py
|
fergalmoran/dss.api
|
d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073
|
[
"BSD-2-Clause"
] | null | null | null |
api/auth.py
|
fergalmoran/dss.api
|
d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073
|
[
"BSD-2-Clause"
] | null | null | null |
api/auth.py
|
fergalmoran/dss.api
|
d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073
|
[
"BSD-2-Clause"
] | null | null | null |
import datetime
import json
from calendar import timegm
from urllib.parse import parse_qsl
import requests
from allauth.socialaccount import models as aamodels
from requests_oauthlib import OAuth1
from rest_framework import parsers, renderers
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework_jwt.settings import api_settings
from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler
from dss import settings
from spa.models import UserProfile
from spa.models.socialaccountlink import SocialAccountLink
def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload):
"""
Do some magic here to find user account and deprecate psa
1. Look for account in
"""
user = None
try:
sa = SocialAccountLink.objects.get(social_id=uid)
sa.type = provider
sa.social_id = uid
sa.access_token = access_token
sa.access_token_secret = access_token_secret
sa.provider_data = payload
sa.save()
user = UserProfile.objects.get(id=sa.user.id)
except SocialAccountLink.DoesNotExist:
# try allauth
try:
aa = aamodels.SocialAccount.objects.get(uid=uid)
try:
user = UserProfile.objects.get(user__id=aa.user_id)
except UserProfile.DoesNotExist:
print('Need to create UserProfile')
# we got an allauth, create the SocialAccountLink
sa = SocialAccountLink()
sa.user = user
sa.social_id = aa.uid
sa.type = aa.provider
sa.access_token = access_token
sa.access_token_secret = access_token_secret
sa.provider_data = payload
sa.save()
except aamodels.SocialAccount.DoesNotExist:
print('Need to create social model')
return user if user else None
class SocialLoginHandler(APIView):
"""View to authenticate users through social media."""
permission_classes = (AllowAny,)
def post(self, request):
uid = None
backend = request.query_params.get('backend')
user = None
if backend in ['twitter']:
request_token_url = 'https://api.twitter.com/oauth/request_token'
access_token_url = 'https://api.twitter.com/oauth/access_token'
access_token = ""
access_token_secret = ""
if request.data.get('oauth_token') and request.data.get('oauth_verifier'):
auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY,
client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET,
resource_owner_key=request.data.get('oauth_token'),
verifier=request.data.get('oauth_verifier'))
r = requests.post(access_token_url, auth=auth)
profile = dict(parse_qsl(r.text))
payload = json.dumps(profile)
uid = profile.get('user_id')
access_token = profile.get('oauth_token')
access_token_secret = profile.get('oauth_token_secret')
user = _temp_reverse_user(uid, 'twitter', access_token, access_token_secret, payload)
else:
oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY,
client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET,
callback_uri=settings.TWITTER_CALLBACK_URL)
r = requests.post(request_token_url, auth=oauth)
access_token = dict(parse_qsl(r.text))
return Response(access_token)
elif backend in ['facebook']:
access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token'
graph_api_url = 'https://graph.facebook.com/v2.3/me'
access_token = ""
access_token_secret = ""
params = {
'client_id': request.data.get('clientId'),
'redirect_uri': request.data.get('redirectUri'),
'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET,
'code': request.data.get('code')
}
# Step 1. Exchange authorization code for access token.
r = requests.get(access_token_url, params=params)
token = json.loads(r.text)
# Step 2. Retrieve information about the current user.
r = requests.get(graph_api_url, params=token)
profile = json.loads(r.text)
access_token = token.get('access_token')
uid = profile.get('id')
user = _temp_reverse_user(uid, 'facebook', access_token, access_token_secret, r.text)
elif backend in ['google']:
access_token_url = 'https://accounts.google.com/o/oauth2/token'
people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'
access_token = ""
access_token_secret = ""
payload = dict(client_id=request.data.get('clientId'),
redirect_uri=request.data.get('redirectUri'),
client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET,
code=request.data.get('code'),
grant_type='authorization_code')
# Step 1. Exchange authorization code for access token.
r = requests.post(access_token_url, data=payload)
token = json.loads(r.text)
headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])}
# Step 2. Retrieve information about the current user.
r = requests.get(people_api_url, headers=headers)
profile = json.loads(r.text)
uid = profile.get('sub')
user = _temp_reverse_user(uid, 'google', access_token, access_token_secret, r.text)
if uid is not None and user is not None:
if not user.user.is_active:
return Response({
'status': 'Unauthorized',
'message': 'User account disabled'
}, status=status.HTTP_401_UNAUTHORIZED)
payload = jwt_payload_handler(user.user)
if api_settings.JWT_ALLOW_REFRESH:
payload['orig_iat'] = timegm(
datetime.datetime.utcnow().utctimetuple()
)
response_data = {
'token': jwt_encode_handler(payload),
'session': user.get_session_id()
}
return Response(response_data)
return Response({
'status': 'Bad request',
'message': 'Authentication could not be performed with received data.'
}, status=status.HTTP_400_BAD_REQUEST)
class ObtainUser(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
model = Token
def post(self, request):
return self.get(request)
def get(self, request):
if request.user.is_authenticated():
return Response(
status=status.HTTP_200_OK, data={
'id': request.user.id,
'name': request.user.username,
'session': request.user.userprofile.get_session_id(),
'slug': request.user.userprofile.slug,
'session': request.user.userprofile.get_session_id(),
'userRole': 'user',
})
else:
return Response(status=status.HTTP_401_UNAUTHORIZED)
| 42.105263
| 101
| 0.61525
| 876
| 8,000
| 5.406393
| 0.223744
| 0.090583
| 0.043919
| 0.046453
| 0.361486
| 0.25549
| 0.212838
| 0.157095
| 0.157095
| 0.157095
| 0
| 0.004802
| 0.297125
| 8,000
| 189
| 102
| 42.328042
| 0.837453
| 0.0505
| 0
| 0.211538
| 0
| 0
| 0.105054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.128205
| 0.00641
| 0.262821
| 0.012821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b46022f290a59526dcdb44e97324f9e8df677ff
| 11,520
|
py
|
Python
|
nvdbgeotricks.py
|
LtGlahn/estimat_gulstripe
|
8bb93d52131bdda9846810dbd6bac7f872377859
|
[
"MIT"
] | null | null | null |
nvdbgeotricks.py
|
LtGlahn/estimat_gulstripe
|
8bb93d52131bdda9846810dbd6bac7f872377859
|
[
"MIT"
] | null | null | null |
nvdbgeotricks.py
|
LtGlahn/estimat_gulstripe
|
8bb93d52131bdda9846810dbd6bac7f872377859
|
[
"MIT"
] | null | null | null |
"""
En samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til å gjøre nyttige ting, f.eks. lagre geografiske datasett
Disse hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely og en del andre ting som må
installeres separat. Noen av disse bibliotekene kunne historisk av og til være plundrete å installere, evt
ha versjonskonflikter seg i mellom, spesielt på windows. Slikt plunder hører historien til (stort sett)
Anbefalingen er like fullt å bruke (ana)conda installasjon i et eget "environment". Dette er god kodehygiene
og sikrer minimalt med kluss, samt ikke minst: Eventuelt kluss lar seg greit reparere ved å lage nytt "enviroment",
uten at det påvirker hele python-installasjonen din.
"""
import re
import pdb
from shapely import wkt
# from shapely.ops import unary_union
import pandas as pd
import geopandas as gpd
from datetime import datetime
import nvdbapiv3
from apiforbindelse import apiforbindelse
def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True):
"""
Lagrer NVDB vegnett og angitte objekttyper til geopackage
ARGUMENTS
objekttyper: Liste med objekttyper du vil lagre
KEYWORDS
mittfilter=None : Dictionary med filter til søkeobjekt i nvdbapiv3.py, for eksempel { 'kommune' : 5001 }
Samme filter brukes på både vegnett og fagdata
vegnett=True : Bool, default=True. Angir om vi skal ta med data om vegnett eller ikke
vegsegmenter=False : Bool, default=False. Angir om vi skal repetere objektet delt inn etter vegsegementer
geometri=True : Bool, default=True. Angir om vi skal hente geometri fra egengeometri (hvis det finnes)
Hvis du ønsker å presentere vegobjekt ut fra objektets stedfesting langs veg så bruker du kombinasjonen
vegsegmenter=True, geometri=False
RETURNS
None
"""
if not '.gpkg' in filnavn:
filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg'
if not isinstance(objekttyper, list ):
objekttyper = [ objekttyper ]
for enObjTypeId in objekttyper:
enObjTypeId = int( enObjTypeId )
sok = nvdbapiv3.nvdbFagdata( enObjTypeId )
if mittfilter:
sok.filter( mittfilter )
stat = sok.statistikk()
objtypenavn = sok.objektTypeDef['navn']
print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn )
lagnavn = 'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() )
rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri )
if len( rec ) > 0:
mindf = pd.DataFrame( rec )
# Må trickse litt for å unngå navnekollisjon
kolonner = list( mindf.columns )
lowerkolonner = [ x.lower() for x in kolonner ]
# Duplicate element indices in list
# Using list comprehension + list slicing
# https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/
res = [idx for idx, val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]]
for ii, dublett in enumerate( res):
mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' + str( ii+1 ) }, inplace=True )
mindf['geometry'] = mindf['geometri'].apply( wkt.loads )
minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )
# må droppe kolonne vegsegmenter hvis du har vegsegmenter=False
if 'vegsegmenter' in minGdf.columns:
minGdf.drop( 'vegsegmenter', 1, inplace=True)
minGdf.drop( 'geometri', 1, inplace=True)
minGdf.to_file( filnavn, layer=lagnavn, driver="GPKG")
else:
print( 'Ingen forekomster av', objtypenavn, 'for filter', mittfilter)
if vegnett:
veg = nvdbapiv3.nvdbVegnett()
if mittfilter:
junk = mittfilter.pop( 'egenskap', None)
junk = mittfilter.pop( 'overlapp', None)
veg.filter( mittfilter )
print( 'Henter vegnett')
rec = veg.to_records()
mindf = pd.DataFrame( rec)
mindf['geometry'] = mindf['geometri'].apply( wkt.loads )
mindf.drop( 'geometri', 1, inplace=True)
minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )
minGdf.to_file( filnavn, layer='vegnett', driver="GPKG")
def dumpkontraktsomr( komr = [] ):
"""
Dumper et har (hardkodede) kontraktsområder
"""
if not komr:
komr = [ '9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ]
komr = [ '9253 Agder elektro og veglys 2021-2024']
objliste = [ 540, # Trafikkmengde
105, # Fartsgrense
810, # Vinterdriftsklasse
482, # trafikkregistreringsstasjon
153, # Værstasjon
64, # Ferjeleie
39, # Rasteplass
48, # Fortau
199, # Trær
15, # Grasdekker
274, # Blomsterbeplanting
511, # Busker
300 , # Naturområde (ingen treff i Haugesund kontrakt)
517, # Artsrik vegkant
800, # Fremmede arter
67, # Tunnelløp
846, # Skredsikring, bremsekjegler
850 # Skredsikring, forbygning
]
objliste = []
for enkontrakt in komr:
filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt )
nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt })
def firefeltrapport( mittfilter={}):
"""
Finner alle firefeltsveger i Norge, evt innafor angitt søkekriterie
Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3
ARGUMENTS
None
KEYWORDS:
mittfilter: Dictionary med søkefilter
RETURNS
geodataframe med resultatet
"""
v = nvdbapiv3.nvdbVegnett()
# Legger til filter på kun fase = V (eksistende veg), såfremt det ikke kommer i konflikt med anna filter
if not 'vegsystemreferanse' in mittfilter.keys():
mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv'
if not 'kryssystem' in mittfilter.keys():
mittfilter['kryssystem'] = 'false'
if not 'sideanlegg' in mittfilter.keys():
mittfilter['sideanlegg'] = 'false'
v.filter( mittfilter )
# Kun kjørende, og kun øverste topologinivå, og ikke adskiltelop=MOT
v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' } )
data = []
vegsegment = v.nesteForekomst()
while vegsegment:
if sjekkfelt( vegsegment, felttype='firefelt'):
vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] )
vegsegment['geometri'] = vegsegment['geometri']['wkt']
vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform']
vegsegment['vegnr'] = vegsegment['vref'].split()[0]
vegsegment['vegkategori'] = vegsegment['vref'][0]
vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp']
data.append( vegsegment )
vegsegment = v.nesteForekomst()
if len( data ) > 1:
mindf = pd.DataFrame( data )
mindf['geometry'] = mindf['geometri'].apply( wkt.loads )
mindf.drop( 'geometri', 1, inplace=True)
mindf.drop( 'kontraktsområder', 1, inplace=True)
mindf.drop( 'riksvegruter', 1, inplace=True)
mindf.drop( 'href', 1, inplace=True)
mindf.drop( 'metadata', 1, inplace=True)
mindf.drop( 'kortform', 1, inplace=True)
mindf.drop( 'veglenkenummer', 1, inplace=True)
mindf.drop( 'segmentnummer', 1, inplace=True)
mindf.drop( 'startnode', 1, inplace=True)
mindf.drop( 'sluttnode', 1, inplace=True)
mindf.drop( 'referanse', 1, inplace=True)
mindf.drop( 'målemetode', 1, inplace=True)
mindf.drop( 'måledato', 1, inplace=True)
minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )
return minGdf
else:
return None
def sjekkfelt( vegsegment, felttype='firefelt' ):
"""
Sjekker hva slags felt som finnes på et vegsegment
ARGUMENTS:
vegsegment - dicionary med data om en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/
KEYWORDS:
felttype - hva slags felttype som skal sjekkes. Mulige verdier:
firefelt (default). Antar at firefeltsveg betyr at kjørefeltnummer 1-4 er brukt og er enten vanlig kj.felt, kollektivfelt eller reversibelt felt
(flere varianter kommer når de trengs)
RETURNS
boolean - True hvis kjørefeltene er av riktig type
"""
svar = False
vr = 'vegsystemreferanse'
sr = 'strekning'
if felttype == 'firefelt':
if 'feltoversikt' in vegsegment.keys() and 'detaljnivå' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']:
kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) )
if vr in vegsegment.keys():
if sr in vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]:
if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset( { 1, 2, 3, 4}):
svar = True
# Siste klausul her har f.eks. forekommet på Fv5724, envegskjørt tunnel ved Oldenvatnet.
elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len( kjfelt ) >= 2 and not kjfelt.issuperset( {1, 2} ):
svar = True
return svar
else:
raise NotImplementedError('Sjekkfelt: Sjekk for felt av type: ' + felttype + 'er ikke implementert (ennå)' )
def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]):
"""
Returnerer liste med kjørefeltnummer filtrert på hva slags feltkode vi evt har
ARGUMENTS
feltoversikt - Liste med feltkoder for et vegsegment.
KEYWORDS
mittfilter=['vanlig', 'K', 'R' ] - Liste med koder for hva slags felt vi skal telle med. Sjekk håndbok v830
Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon:
'vanlig' - Helt vanlig kjørefelt, kjørefeltnumemr er angitt som heltall uten noen bokstaver.
'K' - kollektivfelt
'R' - reversibelt felt
'S' - Sykkelfelt
'H' - Svingefelt mot høyre
'V' - Svingefelt mot venstre
'B' - Ekstra felt for bompengeinnkreving
RETURNS
Liste med kjørefeltnummer hvor kun kjørefelt som angitt med mittfilter-nøkkelord er inkludert
"""
data = [ ]
for felt in feltoversikt:
feltbokstav = re.findall( '[A-Za-z]', felt)
if feltbokstav:
feltbokstav = feltbokstav[0]
else:
feltbokstav = 'vanlig'
if feltbokstav in mittfilter:
feltnummer = int( re.split( '[A-Z]', felt)[0] )
data.append( feltnummer )
return data
| 39.183673
| 157
| 0.615712
| 1,221
| 11,520
| 5.799345
| 0.40131
| 0.019206
| 0.028809
| 0.03121
| 0.123994
| 0.060443
| 0.056207
| 0.050699
| 0.034741
| 0.034741
| 0
| 0.018797
| 0.288802
| 11,520
| 293
| 158
| 39.317406
| 0.845478
| 0.355903
| 0
| 0.148649
| 0
| 0
| 0.144836
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033784
| false
| 0
| 0.054054
| 0
| 0.114865
| 0.02027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b477719b2c91c9e3ee4ff6ba226b115ec30e5ff
| 979
|
py
|
Python
|
019_CountingSundays.py
|
joetache4/project-euler
|
82f9e25b414929d9f62d94905906ba2f57db7935
|
[
"MIT"
] | null | null | null |
019_CountingSundays.py
|
joetache4/project-euler
|
82f9e25b414929d9f62d94905906ba2f57db7935
|
[
"MIT"
] | null | null | null |
019_CountingSundays.py
|
joetache4/project-euler
|
82f9e25b414929d9f62d94905906ba2f57db7935
|
[
"MIT"
] | null | null | null |
"""
You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
ans: 171
"""
# set to day of week for 1 Jan 1901 (Tuesday)
dow = 2
def no_days(month, year):
if month in [0,2,4,6,7,9,11]:
return 31
elif month in [3,5,8,10]:
return 30
elif year % 400 == 0:
return 29
elif year % 100 == 0:
return 28
elif year % 4 == 0:
return 29
else:
return 28
sum = 0
for y in range(1901, 2001):
for m in range(0, 12):
if dow == 0:
sum += 1
dow = (dow + no_days(m, y)) % 7
print(sum)
| 23.878049
| 109
| 0.660878
| 184
| 979
| 3.505435
| 0.581522
| 0.018605
| 0.024806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107629
| 0.250255
| 979
| 41
| 110
| 23.878049
| 0.771117
| 0.608784
| 0
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0
| 0
| 0.333333
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6b4c6ac7304c74c6af0453d81ea3a3dfae8d7b81
| 1,033
|
py
|
Python
|
server/dbcls/api/resources/authenticate.py
|
ripry/umakaviewer
|
e3df32313219d1b9d65edb6d180b2b4799d87e25
|
[
"MIT"
] | 2
|
2017-08-17T02:01:48.000Z
|
2019-12-19T12:11:08.000Z
|
server/dbcls/api/resources/authenticate.py
|
ripry/umakaviewer
|
e3df32313219d1b9d65edb6d180b2b4799d87e25
|
[
"MIT"
] | 3
|
2021-04-04T01:25:07.000Z
|
2021-10-20T06:07:29.000Z
|
server/dbcls/api/resources/authenticate.py
|
ripry/umakaviewer
|
e3df32313219d1b9d65edb6d180b2b4799d87e25
|
[
"MIT"
] | 4
|
2020-12-01T04:20:55.000Z
|
2020-12-04T04:51:54.000Z
|
from flask_restful import Resource, reqparse
from firebase_admin import auth as firebase_auth
from dbcls.models import User
parser = reqparse.RequestParser()
parser.add_argument('token', type=str, required=True, nullable=False)
class Authenticate(Resource):
def post(self):
try:
args = parser.parse_args()
decoded_token = firebase_auth.verify_id_token(args['token'])
except (ValueError, firebase_auth.AuthError) as e:
return {'message': f'{e}'}, 400
firebase_uid = decoded_token['uid']
user = User.query.filter_by(firebase_uid=firebase_uid).first()
if not user:
return {'message': 'user not found. You have to sign up.'}, 400
custom_token = firebase_auth.create_custom_token(firebase_uid)
return {
'custom_token': custom_token.decode(),
'display_name': user.display_name,
'contact_uri': user.contact_uri,
'roles': [role.role_type for role in user.user_roles],
}
| 33.322581
| 75
| 0.653437
| 128
| 1,033
| 5.054688
| 0.523438
| 0.074189
| 0.05255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007673
| 0.242982
| 1,033
| 30
| 76
| 34.433333
| 0.819693
| 0
| 0
| 0
| 0
| 0
| 0.102614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.130435
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
860946d6e7694a280a705683f6a6189d61f153d3
| 475
|
py
|
Python
|
GetJSONData_NLPParser.py
|
Feiyi-Ding/2021A
|
f599f0a21e05964fffce3dcf2d32ef70ddc3c75d
|
[
"Apache-2.0"
] | null | null | null |
GetJSONData_NLPParser.py
|
Feiyi-Ding/2021A
|
f599f0a21e05964fffce3dcf2d32ef70ddc3c75d
|
[
"Apache-2.0"
] | 2
|
2021-03-22T17:57:27.000Z
|
2021-03-22T17:58:01.000Z
|
GetJSONData_NLPParser.py
|
Feiyi-Ding/2021A
|
f599f0a21e05964fffce3dcf2d32ef70ddc3c75d
|
[
"Apache-2.0"
] | 4
|
2021-03-09T16:15:30.000Z
|
2021-03-17T15:04:17.000Z
|
#Import required modules
import requests
import json
# Get json results for the required input
InputString = "kobe is a basketball player"
headers = {
'Content-type': 'application/json',
}
data = '{"text":InputString = '+ InputString + '}'
response = requests.post('http://66.76.242.198:9888/', data=data).json()
#Adding a test comment to check if the automatic git pull is working or not
#print(json.dumps(response, indent=4, sort_keys=True))
| 23.75
| 76
| 0.688421
| 65
| 475
| 5.015385
| 0.753846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03886
| 0.187368
| 475
| 19
| 77
| 25
| 0.805699
| 0.402105
| 0
| 0
| 0
| 0
| 0.398467
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86095983c39bff7a689e2233b004ba39842ac699
| 1,719
|
py
|
Python
|
language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 1,199
|
2018-10-16T01:30:18.000Z
|
2022-03-31T21:05:24.000Z
|
language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 116
|
2018-10-18T03:31:46.000Z
|
2022-03-24T13:40:50.000Z
|
language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 303
|
2018-10-22T12:35:12.000Z
|
2022-03-27T17:38:17.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sentencize the raw wikitext103."""
import tensorflow.compat.v1 as tf
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("wiki103_raw", None,
"Path to raw wikitext103 train corpus.")
flags.DEFINE_string("output_path", None,
"Path to output the processed dataset.")
FLAGS = flags.FLAGS
def main(_):
with open(FLAGS.wiki103_raw, "r") as f:
data = f.read().strip().split("\n")
data = [x.split(" . ") for x in data if x.strip() and x.strip()[0] != "="]
sentences = []
for para in data:
for sent in para:
sentences.append(sent + ".")
data = "\n".join(sentences)
data = data.replace(" @.@ ", ".").replace(" @-@ ", "-").replace(" ,", ",")
data = data.replace(" \'", "\'").replace(" )", ")").replace("( ", "(")
data = data.replace(" ;", ";")
data = "\n".join([x for x in data.split("\n") if len(x.split()) > 3])
logging.info("length = %d", len(data.split("\n")))
with open(FLAGS.output_path, "w") as f:
f.write(data)
if __name__ == "__main__":
app.run(main)
| 29.135593
| 76
| 0.64107
| 248
| 1,719
| 4.383065
| 0.479839
| 0.055198
| 0.041398
| 0.029439
| 0.067157
| 0.067157
| 0.067157
| 0.067157
| 0
| 0
| 0
| 0.01809
| 0.196044
| 1,719
| 58
| 77
| 29.637931
| 0.768452
| 0.369983
| 0
| 0
| 0
| 0
| 0.151458
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
860af185b3aec78bf051659802424a1b61b8f5ba
| 6,742
|
py
|
Python
|
databuilder/loader/file_system_neo4j_csv_loader.py
|
davcamer/amundsendatabuilder
|
1bd6cd5c30413640d4c377dc3c59c283e86347eb
|
[
"Apache-2.0"
] | null | null | null |
databuilder/loader/file_system_neo4j_csv_loader.py
|
davcamer/amundsendatabuilder
|
1bd6cd5c30413640d4c377dc3c59c283e86347eb
|
[
"Apache-2.0"
] | null | null | null |
databuilder/loader/file_system_neo4j_csv_loader.py
|
davcamer/amundsendatabuilder
|
1bd6cd5c30413640d4c377dc3c59c283e86347eb
|
[
"Apache-2.0"
] | 1
|
2019-09-21T23:56:41.000Z
|
2019-09-21T23:56:41.000Z
|
import csv
import logging
import os
import shutil
from csv import DictWriter # noqa: F401
from pyhocon import ConfigTree, ConfigFactory # noqa: F401
from typing import Dict, Any # noqa: F401
from databuilder.job.base_job import Job
from databuilder.loader.base_loader import Loader
from databuilder.models.neo4j_csv_serde import NODE_LABEL, \
RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE
from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401
from databuilder.utils.closer import Closer
LOGGER = logging.getLogger(__name__)
class FsNeo4jCSVLoader(Loader):
"""
Write node and relationship CSV file(s) that can be consumed by
Neo4jCsvPublisher.
It assumes that the record it consumes is instance of Neo4jCsvSerializable
"""
# Config keys
NODE_DIR_PATH = 'node_dir_path'
RELATION_DIR_PATH = 'relationship_dir_path'
FORCE_CREATE_DIR = 'force_create_directory'
SHOULD_DELETE_CREATED_DIR = 'delete_created_directories'
_DEFAULT_CONFIG = ConfigFactory.from_dict({
SHOULD_DELETE_CREATED_DIR: True,
FORCE_CREATE_DIR: False
})
def __init__(self):
# type: () -> None
self._node_file_mapping = {} # type: Dict[Any, DictWriter]
self._relation_file_mapping = {} # type: Dict[Any, DictWriter]
self._closer = Closer()
def init(self, conf):
# type: (ConfigTree) -> None
"""
Initializing FsNeo4jCsvLoader by creating directory for node files
and relationship files. Note that the directory defined in
configuration should not exist.
:param conf:
:return:
"""
conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG)
self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH)
self._relation_dir = \
conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH)
self._delete_created_dir = \
conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR)
self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR)
self._create_directory(self._node_dir)
self._create_directory(self._relation_dir)
def _create_directory(self, path):
# type: (str) -> None
"""
Validate directory does not exist, creates it, register deletion of
created directory function to Job.closer.
:param path:
:return:
"""
if os.path.exists(path):
if self._force_create_dir:
LOGGER.info('Directory exist. Deleting directory {}'.format(path))
shutil.rmtree(path)
else:
raise RuntimeError('Directory should not exist: {}'.format(path))
os.makedirs(path)
def _delete_dir():
# type: () -> None
if not self._delete_created_dir:
LOGGER.warn('Skip Deleting directory {}'.format(path))
return
LOGGER.info('Deleting directory {}'.format(path))
shutil.rmtree(path)
# Directory should be deleted after publish is finished
Job.closer.register(_delete_dir)
def load(self, csv_serializable):
# type: (Neo4jCsvSerializable) -> None
"""
Writes Neo4jCsvSerializable into CSV files.
There are multiple CSV files that this method writes.
This is because there're not only node and relationship, but also it
can also have different nodes, and relationships.
Common pattern for both nodes and relations:
1. retrieve csv row (a dict where keys represent a header,
values represent a row)
2. using this dict to get a appropriate csv writer and write to it.
3. repeat 1 and 2
:param csv_serializable:
:return:
"""
node_dict = csv_serializable.next_node()
while node_dict:
key = (node_dict[NODE_LABEL], len(node_dict))
file_suffix = '{}_{}'.format(*key)
node_writer = self._get_writer(node_dict,
self._node_file_mapping,
key,
self._node_dir,
file_suffix)
node_writer.writerow(node_dict)
node_dict = csv_serializable.next_node()
relation_dict = csv_serializable.next_relation()
while relation_dict:
key2 = (relation_dict[RELATION_START_LABEL],
relation_dict[RELATION_END_LABEL],
relation_dict[RELATION_TYPE],
len(relation_dict))
file_suffix = '{}_{}_{}'.format(key2[0], key2[1], key2[2])
relation_writer = self._get_writer(relation_dict,
self._relation_file_mapping,
key2,
self._relation_dir,
file_suffix)
relation_writer.writerow(relation_dict)
relation_dict = csv_serializable.next_relation()
def _get_writer(self,
csv_record_dict, # type: Dict[str, Any]
file_mapping, # type: Dict[Any, DictWriter]
key, # type: Any
dir_path, # type: str
file_suffix # type: str
):
# type: (...) -> DictWriter
"""
Finds a writer based on csv record, key.
If writer does not exist, it's creates a csv writer and update the
mapping.
:param csv_record_dict:
:param file_mapping:
:param key:
:param file_suffix:
:return:
"""
writer = file_mapping.get(key)
if writer:
return writer
LOGGER.info('Creating file for {}'.format(key))
file_out = open('{}/{}.csv'.format(dir_path, file_suffix), 'w')
def file_out_close():
# type: () -> None
LOGGER.info('Closing file IO {}'.format(file_out))
file_out.close()
self._closer.register(file_out_close)
writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(),
quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
file_mapping[key] = writer
return writer
def close(self):
# type: () -> None
"""
Any closeable callable registered in _closer, it will close.
:return:
"""
self._closer.close()
def get_scope(self):
# type: () -> str
return "loader.filesystem_csv_neo4j"
| 35.671958
| 82
| 0.590478
| 731
| 6,742
| 5.19015
| 0.247606
| 0.028466
| 0.01845
| 0.024249
| 0.154454
| 0.108065
| 0.062731
| 0
| 0
| 0
| 0
| 0.00883
| 0.328093
| 6,742
| 188
| 83
| 35.861702
| 0.828698
| 0.245328
| 0
| 0.116505
| 0
| 0
| 0.060279
| 0.020305
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087379
| false
| 0
| 0.116505
| 0.009709
| 0.300971
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
860b3ffda1922298f17135c358d64932d9e08e95
| 3,015
|
py
|
Python
|
sample_program_04_02_knn.py
|
pepsinal/python_doe_kspub
|
65ae5c2d214f1a34fa242fee7d63453c81d56bfe
|
[
"MIT"
] | 16
|
2021-01-11T17:57:05.000Z
|
2022-03-29T07:04:26.000Z
|
sample_program_04_02_knn.py
|
pepsinal/python_doe_kspub
|
65ae5c2d214f1a34fa242fee7d63453c81d56bfe
|
[
"MIT"
] | 2
|
2021-08-12T03:18:52.000Z
|
2021-08-13T06:31:55.000Z
|
sample_program_04_02_knn.py
|
pepsinal/python_doe_kspub
|
65ae5c2d214f1a34fa242fee7d63453c81d56bfe
|
[
"MIT"
] | 14
|
2021-06-05T11:17:45.000Z
|
2022-03-26T02:56:40.000Z
|
# -*- coding: utf-8 -*-
"""
@author: Hiromasa Kaneko
"""
import pandas as pd
from sklearn.neighbors import NearestNeighbors # k-NN
k_in_knn = 5 # k-NN における k
rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用
dataset = pd.read_csv('resin.csv', index_col=0, header=0)
x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0)
# データ分割
y = dataset.iloc[:, 0] # 目的変数
x = dataset.iloc[:, 1:] # 説明変数
# 標準偏差が 0 の特徴量の削除
deleting_variables = x.columns[x.std() == 0]
x = x.drop(deleting_variables, axis=1)
x_prediction = x_prediction.drop(deleting_variables, axis=1)
# オートスケーリング
autoscaled_x = (x - x.mean()) / x.std()
autoscaled_x_prediction = (x_prediction - x.mean()) / x.std()
# k-NN による AD
ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言
ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x を model_ad に格納することに対応
# サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに
# トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定
knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1)
knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換
mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1),
columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均
mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定
sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え
ad_threshold = sorted_mean_of_knn_distance_train.iloc[
round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1]
# トレーニングデータに対して、AD の中か外かを判定
inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE
inside_ad_flag_train.columns=['inside_ad_flag']
inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# 予測用データに対する k-NN 距離の計算
knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction)
knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換
mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1),
columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均
mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
# 予測用データに対して、AD の中か外かを判定
inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE
inside_ad_flag_prediction.columns=['inside_ad_flag']
inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意
| 49.42623
| 121
| 0.769818
| 432
| 3,015
| 4.988426
| 0.247685
| 0.107193
| 0.054292
| 0.102552
| 0.529466
| 0.363805
| 0.210673
| 0.181903
| 0
| 0
| 0
| 0.010757
| 0.13665
| 3,015
| 60
| 122
| 50.25
| 0.817134
| 0.258043
| 0
| 0.058824
| 0
| 0
| 0.108361
| 0.058851
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
860d27b54af610b3354ec914d17139eb593aede5
| 1,127
|
py
|
Python
|
lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py
|
sneumann/galaxy
|
f6011bab5b8adbabae4986a45849bb9158ffc8bb
|
[
"CC-BY-3.0"
] | 1
|
2019-07-27T19:30:55.000Z
|
2019-07-27T19:30:55.000Z
|
lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py
|
sneumann/galaxy
|
f6011bab5b8adbabae4986a45849bb9158ffc8bb
|
[
"CC-BY-3.0"
] | 4
|
2021-02-08T20:28:34.000Z
|
2022-03-02T02:52:55.000Z
|
lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py
|
sneumann/galaxy
|
f6011bab5b8adbabae4986a45849bb9158ffc8bb
|
[
"CC-BY-3.0"
] | 1
|
2018-05-30T07:38:54.000Z
|
2018-05-30T07:38:54.000Z
|
"""
Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association table.
"""
from __future__ import print_function
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData
)
from galaxy.model.migrate.versions.util import (
add_column,
drop_column
)
log = logging.getLogger(__name__)
metadata = MetaData()
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
# SQLAlchemy Migrate has a bug when adding a column with both a ForeignKey and a index in SQLite
if migrate_engine.name != 'sqlite':
c = Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True)
else:
c = Column("ldda_id", Integer, index=True, nullable=True)
add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id')
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
drop_column('ldda_id', 'implicitly_converted_dataset_association', metadata)
| 26.833333
| 126
| 0.747116
| 141
| 1,127
| 5.652482
| 0.41844
| 0.037641
| 0.097867
| 0.139272
| 0.263488
| 0.100376
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165927
| 1,127
| 41
| 127
| 27.487805
| 0.847872
| 0.169476
| 0
| 0.148148
| 0
| 0
| 0.199353
| 0.170259
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.148148
| 0
| 0.222222
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
860e80203a82d7ffdb492d80f10371c72ae4d44a
| 8,231
|
py
|
Python
|
scripts/adam/cc100_baselines.py
|
TimDettmers/sched
|
e16735f2c2eb6a51f5cf29ead534041574034e2e
|
[
"MIT"
] | 1
|
2020-04-22T17:49:48.000Z
|
2020-04-22T17:49:48.000Z
|
scripts/adam/cc100_baselines.py
|
TimDettmers/sched
|
e16735f2c2eb6a51f5cf29ead534041574034e2e
|
[
"MIT"
] | null | null | null |
scripts/adam/cc100_baselines.py
|
TimDettmers/sched
|
e16735f2c2eb6a51f5cf29ead534041574034e2e
|
[
"MIT"
] | null | null | null |
import numpy as np
import itertools
import gpuscheduler
import argparse
import os
import uuid
import hashlib
import glob
import math
from itertools import product
from torch.optim.lr_scheduler import OneCycleLR
from os.path import join
parser = argparse.ArgumentParser(description='Compute script.')
parser.add_argument('--dry', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a configuration.')
args = parser.parse_args()
gpus = 128
cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus)
args2 = {}
name = 'blockwise5'
constraint = 'volta32gb'
# 1024 tokens * 8 update_freq * 56250 steps = 0.4608e9 tokens -> optimal batch size 3460
# model sizes: 1.92bn, 2.43bn, 1.41bn
logfolder = 'adam/cc100/{0}'.format(name)
ckp_name = logfolder
#time_hours = 24*2
cores_per_job = 5
mem = 56*(8 if gpus > 8 else gpus)
num_seeds = 1
seed_offset = 5
time_hours = 72
time_minutes = 0
#partition = 'learnlab,learnfair,scavenge'
partition = 'learnfair,learnlab'
#partition = 'learnfair'
#partition = 'uninterruptible'
change_dir = 'fairseq_private'
repo = 'fairseq_private'
exclude = ''
s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False)
fp16 = True
args3 = {}
args2['lr-scheduler'] = 'polynomial_decay'
args2['warmup-updates'] = 2000
args2['max-update'] = 56250
args2['total-num-update'] = 56250
#args2['lr-scheduler'] = 'cosine'
#args2['warmup-updates'] = 3000
#args2['max-update'] = 56250*4
args2['fp16-scale-window'] = 250
args2['clip-norm'] = 0.4
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]
#args3['adam8bits-offset'] = [1/512]
#args3['prob-quant'] = [False]
#args3['dist-scale'] = [1.0]
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3['decoder-embed-dim'] = [2048+256]
#args3['decoder-ffn-embed-dim'] = [8192+2048]
#args3['max-tokens'] = [3072]
#args3['update-freq'] = [2]
key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr')
#key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq')
args3[key] = []
#lrkey = ('lr', 'warmup-init-lr')
#args3[lrkey] = []
# 32-bit baseline
#args3['optimizer'] = ['adam']
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]
##args3[key].append((2048,2048,8192,8, 0.00075))
#args3[key].append((2048,2048,8192,2))
#
#lr = 0.003239 + (-0.0001395*math.log(1.41e9))
#args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8))
# adafactor
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)]
#args2['optimizer'] = 'adafactor'
#args2['beta1'] = 0.9
#args2['decay-rate'] = 0.999
##args3[key].append((2048,2048,8192,8, 0.00075))
#args3[key].append((2048,2048+256,8192+2048,2))
##args3[key].append((2048,2688,10752,2))
#
#lr = 0.003239 + (-0.0001395*math.log(1.92e9))
#args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8))
# 8-bit
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]
#args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)]
#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)]
args3['optimizer'] = ['adam']
args3[('use-bnb', 'optim-bits')] = [(True, 8)]
args3[('stable-emb', 'no-scale-embedding')] = [(True, True)]
#args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False, False, False)]
#args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)]
#args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)]
args3[key].append((2048,2048,8192,8, 0.00075))
#args3[key].append((2048,2048,8192,8, 0.00045))
#args3[key].append((2048,2688,10752,2))
#args3['use-emb-norm'] = [True]
#lr = 0.003239 + (-0.0001395*math.log(2.43e9))
#args3[lrkey].append((lr, 0.0))
#args2['train-subset'] = 'train11'
args4 = []
args5 = {}
args6 = {}
rdm = np.random.RandomState(5345)
for key, value in args2.items():
cmd = cmd + ' --{0} {1}'.format(key, value)
args_prod = []
for key, values in args3.items():
if isinstance(key, tuple):
keyvalues = []
for tups in values:
arg = ''
for i, v in enumerate(tups):
if v is True: v = ''
if v is False: continue
if len(key[i]) == 0:
arg += '{0} '.format(v)
else:
arg += '--{0} {1} '.format(key[i], v)
keyvalues.append(arg)
elif isinstance(key, str):
keyvalues = []
for v in values:
if v is True: v = ''
if v is False:
keyvalues.append('')
else:
keyvalues.append(' --{0} {1}'.format(key, v))
args_prod.append(keyvalues)
if len(args_prod) >= 2:
args_prod = list(product(*args_prod))
else:
new_args = []
if len(args_prod) > 0:
for arg in args_prod[0]:
new_args.append([arg])
args_prod = new_args
jobs = []
if len(args4) == 0: args4.append('')
for seed in range(num_seeds):
seed = seed + seed_offset
for arg4 in args4:
if len(args_prod) == 0: args_prod.append(('', ''))
for i, values in enumerate(args_prod):
job_cmd = cmd + arg4
for val in values:
job_cmd += ' {0}' .format(val)
#job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)
if not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ')
job_cmd = job_cmd + ' --seed {0}'.format(seed)
checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)
save_dir = ' --save-dir {0}'.format(checkpoint_dir)
job_cmd = job_cmd + save_dir
cmds = [job_cmd]
if rdm.rand(1) <= args.p:
jobs.append(job_cmd)
s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus)
if args.dry:
for i, job in enumerate(jobs):
print(i, job)
print('')
print('Total jobs', len(jobs))
print('Time hours: {0}'.format(time_hours))
print('GPUs: {0}'.format(gpus))
print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder)))
print('Jobs will be run on: {0}'.format(partition))
print('Run in folder: {0}'.format(change_dir))
if not args.dry:
s.run_jobs()
| 37.756881
| 773
| 0.628721
| 1,156
| 8,231
| 4.40917
| 0.257785
| 0.015303
| 0.021974
| 0.028252
| 0.345301
| 0.330783
| 0.318226
| 0.294683
| 0.284481
| 0.258387
| 0
| 0.082858
| 0.168631
| 8,231
| 217
| 774
| 37.930876
| 0.66199
| 0.390718
| 0
| 0.059829
| 0
| 0.008547
| 0.281124
| 0.086095
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.102564
| 0
| 0.102564
| 0.068376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
860f02df53bc5c8189989d03588264d399ebda12
| 2,086
|
py
|
Python
|
neurodocker/reprozip/tests/test_merge.py
|
sulantha2006/neurodocker
|
d03fe865ae05fea2f7ce9a8b417717dae7bd640f
|
[
"Apache-2.0"
] | null | null | null |
neurodocker/reprozip/tests/test_merge.py
|
sulantha2006/neurodocker
|
d03fe865ae05fea2f7ce9a8b417717dae7bd640f
|
[
"Apache-2.0"
] | null | null | null |
neurodocker/reprozip/tests/test_merge.py
|
sulantha2006/neurodocker
|
d03fe865ae05fea2f7ce9a8b417717dae7bd640f
|
[
"Apache-2.0"
] | 1
|
2020-01-17T17:30:16.000Z
|
2020-01-17T17:30:16.000Z
|
"""Tests for merge.py."""
from __future__ import absolute_import, division, print_function
from glob import glob
import os
import tarfile
import tempfile
from neurodocker.docker import client
from neurodocker.reprozip.trace import ReproZipMinimizer
from neurodocker.reprozip.merge import merge_pack_files
def _create_packfile(commands, dir):
"""Create packfile from list `commands` in debian:stretch container."""
container = client.containers.run('debian:stretch', detach=True, tty=True,
security_opt=['seccomp:unconfined'])
try:
minimizer = ReproZipMinimizer(container.id, commands,
packfile_save_dir=dir)
packfile_path = minimizer.run()
except:
raise
finally:
container.stop()
container.remove()
return packfile_path
def test_merge_pack_files():
tmpdir = tempfile.mkdtemp()
cmd = ["du -sh /usr", "rm --help"]
packpath = _create_packfile(cmd, tmpdir)
new_name = "first-pack.rpz"
os.rename(packpath, os.path.join(tmpdir, new_name))
cmd = ["ls -l /", "grep --help"]
_create_packfile(cmd, tmpdir)
pattern = os.path.join(tmpdir, '*.rpz')
packfiles = glob(pattern)
assert packfiles, "packfiles not found"
outfile = os.path.join(tmpdir, 'merged.rpz')
merge_pack_files(outfile=outfile, packfiles=packfiles)
with tarfile.open(outfile) as tar:
tar.extractall(path=tmpdir)
datafile = os.path.join(tmpdir, 'DATA.tar.gz')
with tarfile.open(datafile) as tardata:
tardata.extractall(path=tmpdir)
usr_path = os.path.join(tmpdir, 'DATA', 'usr', 'bin')
assert os.path.isfile(os.path.join(usr_path, 'du'))
assert os.path.isfile(os.path.join(usr_path, 'grep'))
assert os.path.isfile(os.path.join(usr_path, 'ls'))
assert os.path.isfile(os.path.join(usr_path, 'rm'))
assert not os.path.isfile(os.path.join(usr_path, 'sed'))
assert not os.path.isfile(os.path.join(usr_path, 'tar'))
| 34.196721
| 78
| 0.64861
| 262
| 2,086
| 5.038168
| 0.347328
| 0.077273
| 0.083333
| 0.063636
| 0.193939
| 0.163636
| 0.163636
| 0.163636
| 0.163636
| 0.057576
| 0
| 0
| 0.228667
| 2,086
| 60
| 79
| 34.766667
| 0.820385
| 0.040748
| 0
| 0
| 0
| 0
| 0.077889
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 1
| 0.043478
| false
| 0
| 0.173913
| 0
| 0.23913
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
860f4df3a0a1148896e3af7d663a6706e11e5b27
| 2,429
|
py
|
Python
|
build/step-3-kivy-almost-manylinux/scripts/redirect_html5.py
|
dolang/build-kivy-linux
|
bb3e6dce956659d94604b524aa6702e8c390e15a
|
[
"MIT"
] | null | null | null |
build/step-3-kivy-almost-manylinux/scripts/redirect_html5.py
|
dolang/build-kivy-linux
|
bb3e6dce956659d94604b524aa6702e8c390e15a
|
[
"MIT"
] | null | null | null |
build/step-3-kivy-almost-manylinux/scripts/redirect_html5.py
|
dolang/build-kivy-linux
|
bb3e6dce956659d94604b524aa6702e8c390e15a
|
[
"MIT"
] | null | null | null |
"""
HTML5 contexts.
:author: Dominik Lang
:license: MIT
"""
import contextlib
import io
import sys
__all__ = ['create_document', 'tag', 'as_link']
class create_document(contextlib.redirect_stdout):
"""Redirect output to an HTML5 document specified by new_target.
A HTML document title can be specified, but should not consist of
whitespace only. Default is a dash.
For serialisation, an encoding is included and defaults to UTF-8.
Make sure the output (likely ``new_target``) uses the correct one.
Arguments are not checked for validity.
"""
def __init__(self, new_target, *, title='-', encoding='utf-8'):
super().__init__(new_target)
self._title = str(title)
self._encoding = encoding
def __enter__(self):
new_target = contextlib.redirect_stdout.__enter__(self)
html5 = ('<!DOCTYPE html>\n'
'<html>\n'
'<title>{}</title>\n'
'<meta charset="{}">'.format(self._title, self._encoding))
print(html5)
return new_target
@contextlib.contextmanager
def tag(name):
"""Enclose output in an HTML tag denoted by the name."""
print('<{}>'.format(name))
yield
print('</{}>'.format(name))
class LinkStringIO(io.StringIO):
def __init__(self):
super().__init__()
self._write_text = False # switch between link href="..." and text
def write(self, s):
if not s:
return
# else:
if s.isspace():
return super().write(s)
# else:
if self._write_text:
count = super().write('<a href="')
count += super().write(s)
count += super().write('">')
else:
count = super().write(s)
count += super().write('</a>')
self._write_text = not self._write_text
return count
class write_link(contextlib.redirect_stdout):
"""Combine any two subsequent non-empty writes into an HTML link."""
def __init__(self):
super().__init__(LinkStringIO())
def __exit__(self, exctype, excinst, exctb):
super().__exit__(exctype, excinst, exctb)
with contextlib.closing(self._new_target):
self._new_target.seek(0)
sys.stdout.write(self._new_target.read())
| 28.244186
| 76
| 0.573075
| 276
| 2,429
| 4.771739
| 0.398551
| 0.061503
| 0.049355
| 0.024298
| 0.077449
| 0.047077
| 0.047077
| 0
| 0
| 0
| 0
| 0.004132
| 0.302594
| 2,429
| 85
| 77
| 28.576471
| 0.773318
| 0.230548
| 0
| 0.041667
| 0
| 0
| 0.068525
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145833
| false
| 0
| 0.0625
| 0
| 0.354167
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
860f856dd45e64104260a9b161c8dc5f275852d1
| 1,454
|
py
|
Python
|
lab/hw03-part-i_nov14.py
|
jzacsh/neuralnets-cmp464
|
de35bbba93b87446b231bf012a8de5acc7896a04
|
[
"Apache-2.0"
] | 1
|
2017-08-30T04:31:00.000Z
|
2017-08-30T04:31:00.000Z
|
lab/hw03-part-i_nov14.py
|
jzacsh/neuralnets-cmp464
|
de35bbba93b87446b231bf012a8de5acc7896a04
|
[
"Apache-2.0"
] | 1
|
2017-10-10T23:30:05.000Z
|
2017-10-16T00:32:09.000Z
|
lab/hw03-part-i_nov14.py
|
jzacsh/neuralnets-cmp464
|
de35bbba93b87446b231bf012a8de5acc7896a04
|
[
"Apache-2.0"
] | null | null | null |
"""
Jonathan Zacsh's solution to homework #3, Nov 14., Part I
"""
# Per homework instructions, following lead from matlab example by professor:
# http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf
import sys
import tensorflow as tf
import tempfile
import os
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# not really doing intersting things in this lab, so just ignore optimization
class Differentiable:
""" encapsulation of a function and its derivative """
def __init__(self, label, f, d):
self.func = f
self.deriv = d
self.func.name = label
self.deriv.name = "%sDeriv" % label
# g(x) = x^4+2x-7 ; per matlab example
# g'(x) = 4x^3+2
fExFourth = Differentiable("fExFourth",
lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]),
lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2]))
tFofTwo = fExFourth.func(2)
tFofDerivTwo = fExFourth.deriv(2)
log_dir = tempfile.mkdtemp(prefix="hw3-nov14-parti")
print(log_dir)
with tf.Session() as sess:
writer = tf.summary.FileWriter(log_dir, sess.graph)
fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo])
sys.stderr.write("results:\n\tf(2)=%s\n\tf'(2)=%s\n" % (fOfTwo, fDerivOfTwo))
# note: only needed when doing a *loop* of sess.run() calls, and want to see
# intermediary results per-loop.
#writer.add_summary(results)
writer.flush()
writer.close()
| 31.608696
| 86
| 0.681568
| 221
| 1,454
| 4.420814
| 0.552036
| 0.012283
| 0.018424
| 0.024565
| 0.041965
| 0.030706
| 0
| 0
| 0
| 0
| 0
| 0.024288
| 0.178817
| 1,454
| 45
| 87
| 32.311111
| 0.79397
| 0.363136
| 0
| 0
| 0
| 0
| 0.093923
| 0.036464
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.2
| 0
| 0.28
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
860ffd8531729695796f989eadffa27a2953a3a7
| 8,437
|
py
|
Python
|
modules/experiments_bc/set_tp.py
|
GChrysostomou/tasc
|
d943de343d725b99fa1a1ad201b32a21e5970801
|
[
"MIT"
] | 2
|
2021-12-27T12:46:48.000Z
|
2022-03-01T11:43:41.000Z
|
modules/experiments_bc/set_tp.py
|
tbose20/D-Ref
|
eda6170a72838b89637df241dd5619e001f3afdb
|
[
"MIT"
] | null | null | null |
modules/experiments_bc/set_tp.py
|
tbose20/D-Ref
|
eda6170a72838b89637df241dd5619e001f3afdb
|
[
"MIT"
] | 3
|
2021-11-10T15:15:02.000Z
|
2022-03-01T11:44:35.000Z
|
import torch
import torch.nn as nn
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import *
from sklearn.metrics import precision_recall_fscore_support as prfs
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def degrading_model_perf(data, model, save_path, data_size, largest = True):
print("\n--- Degrading Model Performance \n")
modulo = round(len(data) / 10) + 1
model.embedding.weight.requires_grad_(True)
actual = []
results = {}
results["random"] = []
results["attention"]= []
results["gradient"] = []
results["grad_attention"] = []
results["grad*attention"] = []
_, _, lengths, _ = next(iter(data))
maximum = max(lengths)
if max(lengths) <= 10 :
maximum = max(lengths) - 1
elif max(lengths) > 10 :
maximum = 10
print(maximum)
grad_set = torch.zeros([data_size, maximum]).long().to(device)
att_set = torch.zeros([data_size, maximum]).long().to(device)
rand_set = torch.zeros([data_size, maximum]).long().to(device)
att_grad_set = torch.zeros([data_size, maximum]).long().to(device)
att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device)
actual_set = torch.zeros([data_size, 1]).long().to(device)
docs = []
for batchi, (doc_id, sentences, lengths, labels) in enumerate(data):
model.train()
torch.cuda.empty_cache()
model.zero_grad()
sentences, lengths, labels = sentences.to(device), lengths.to(device), labels.to(device)
yhat, weights_or = model(sentences, lengths, retain_gradient = True)
masking = yhat.max(-1)[1] == labels
if largest == False:
masking = yhat.max(-1)[1] != labels
yhat.max(-1)[0].sum().backward(retain_graph = True)
maxi = max(lengths)
doc_id = doc_id[masking]
yhat = yhat[masking]
sentences = sentences[masking]
labels = labels[masking]
lengths = lengths[masking]
weights_or = weights_or[masking]
docs.extend(doc_id)
g = model.embed.grad[masking]
weights_def_grad = model.weights.grad[masking]
max_lengths = max(max(lengths), maxi)
model_masks = model.masks[masking]
with torch.no_grad():
weights = weights_or.clone()
weight_mul_grad = weights_or * weights_def_grad
weight_mul_grad[model_masks[:,:max_lengths]] = float("-inf")
weights_def_grad_soft = weights_def_grad.clone()
weights_def_grad_soft[model_masks[:,:max_lengths]] = float("-inf")
em = model.embed[masking]
g1 = (g* em).sum(-1)[:,:max_lengths]
g1[model_masks[:,:max_lengths]] = float("-inf")
sentence_att = sentences.clone()[:,:max_lengths]
sentence_grad = sentences.clone()[:,:max_lengths]
sentence_rand = sentences.clone()[:,:max_lengths]
sentence_att_grad = sentences.clone()[:,:max_lengths]
sentence_att_mul_grad = sentences.clone()[:,:max_lengths]
g1[model_masks[:,:max_lengths]] = float("-inf")
top_grad = torch.topk(g1, k = g1.size(1), largest = largest)[1]
top_att = torch.topk(weights, k = weights.size(1),
largest = largest)[1]
top_rand = torch.randn(top_att.shape)
top_rand = torch.topk(top_rand, k = weights.size(1),
largest = largest)[1]
top_att_grad = torch.topk(weights_def_grad_soft,
k = weights.size(1),
largest = largest)[1]
top_att_mul_grad = torch.topk(weight_mul_grad,
k = weights.size(1),
largest = largest)[1]
temp_pred = []
temp_act = []
temp_act.append(labels.cpu().data.numpy())
temp_pred.append(yhat.max(-1)[1].cpu().data.numpy())
model.eval()
actual_set[doc_id] = labels.unsqueeze(-1)
rand_set[doc_id, 0] = yhat.max(-1)[1]
att_set[doc_id, 0] = yhat.max(-1)[1]
grad_set[doc_id, 0] = yhat.max(-1)[1]
att_grad_set[doc_id, 0] = yhat.max(-1)[1]
att_x_grad_set[doc_id, 0] = yhat.max(-1)[1]
rows = torch.arange(sentences.size(0))
for _j_ in range(1,maximum):
sentence_grad[rows, top_grad[:,_j_]] = 0
sentence_att[rows, top_att[:,_j_]] = 0
sentence_att_grad[rows, top_att_grad[:,_j_]] = 0
sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0
sentence_rand[rows, top_rand[:,_j_]] = 0
yhat_rand, _ = model(sentence_rand,lengths)
rand_set[doc_id, _j_] = yhat_rand.max(-1)[1]
yhat_att, _ = model(sentence_att,lengths)
att_set[doc_id, _j_] = yhat_att.max(-1)[1]
yhat_grad, _ = model(sentence_grad,lengths)
grad_set[doc_id, _j_] = yhat_grad.max(-1)[1]
yhat_att_grad, _ = model(sentence_att_grad,lengths)
att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1]
yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths)
att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1]
if batchi % modulo == 0 :
print("Remaining: ", len(data)- batchi)
docs = torch.LongTensor(docs)
rand_set = rand_set[docs]
att_set = att_set[docs]
grad_set = grad_set[docs]
att_grad_set = att_grad_set[docs]
att_x_grad_set = att_x_grad_set[docs]
actual_set = actual_set[docs]
for _k_ in range(0,maximum):
actual = actual_set.flatten().cpu().data.numpy()
rand_pred = classification_report(actual,
rand_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
att_pred = classification_report(actual,
att_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
grad_pred = classification_report(actual,
grad_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
att_grad_pred = classification_report(actual,
att_grad_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
att_x_grad_pred = classification_report(actual,
att_x_grad_set[:,_k_].cpu().data.numpy(),
output_dict = True)["macro avg"]["f1-score"]
results["random"].append(rand_pred)
results["attention"].append(att_pred)
results["gradient"].append(grad_pred)
results["grad_attention"].append(att_grad_pred)
results["grad*attention"].append(att_x_grad_pred)
results = pd.DataFrame.from_dict(results)
results.plot(kind = "line", figsize = (18,10))
ordering = "ascending"
if largest:
ordering = "descending"
plt.savefig(save_path + "_correct_classified_" + ordering + ".png")
results.to_csv(save_path + "_correct_classified_" + ordering + ".csv")
| 34.157895
| 96
| 0.505393
| 922
| 8,437
| 4.327549
| 0.158351
| 0.031579
| 0.016291
| 0.018045
| 0.387218
| 0.320551
| 0.23183
| 0.189724
| 0.177444
| 0.119549
| 0
| 0.015206
| 0.376437
| 8,437
| 247
| 97
| 34.157895
| 0.743205
| 0
| 0
| 0.086667
| 0
| 0
| 0.039464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006667
| false
| 0
| 0.053333
| 0
| 0.06
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8610097182707b2aa40abc68e79c148fa664b19d
| 4,224
|
py
|
Python
|
helios/tasks.py
|
mattmurch/helios-server
|
c4f5409bbf7117fc561774208c07801b9ae61ff2
|
[
"Apache-2.0"
] | null | null | null |
helios/tasks.py
|
mattmurch/helios-server
|
c4f5409bbf7117fc561774208c07801b9ae61ff2
|
[
"Apache-2.0"
] | 2
|
2018-08-20T18:44:57.000Z
|
2019-01-31T17:45:08.000Z
|
helios/tasks.py
|
mattmurch/helios-server
|
c4f5409bbf7117fc561774208c07801b9ae61ff2
|
[
"Apache-2.0"
] | 1
|
2017-12-10T15:33:18.000Z
|
2017-12-10T15:33:18.000Z
|
"""
Celery queued tasks for Helios
2010-08-01
ben@adida.net
"""
import copy
from celery import shared_task
from celery.utils.log import get_logger
import signals
from models import CastVote, Election, Voter, VoterFile
from view_utils import render_template_raw
@shared_task
def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs):
cast_vote = CastVote.objects.get(id=cast_vote_id)
result = cast_vote.verify_and_store()
voter = cast_vote.voter
election = voter.election
user = voter.get_user()
if result:
# send the signal
signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote)
if status_update_message and user.can_update_status():
user.update_status(status_update_message)
else:
logger = get_logger(cast_vote_verify_and_store.__name__)
logger.error("Failed to verify and store %d" % cast_vote_id)
@shared_task
def voters_email(election_id, subject_template, body_template, extra_vars={},
voter_constraints_include=None, voter_constraints_exclude=None):
"""
voter_constraints_include are conditions on including voters
voter_constraints_exclude are conditions on excluding voters
"""
election = Election.objects.get(id=election_id)
# select the right list of voters
voters = election.voter_set.all()
if voter_constraints_include:
voters = voters.filter(**voter_constraints_include)
if voter_constraints_exclude:
voters = voters.exclude(**voter_constraints_exclude)
for voter in voters:
single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars)
@shared_task
def voters_notify(election_id, notification_template, extra_vars={}):
election = Election.objects.get(id=election_id)
for voter in election.voter_set.all():
single_voter_notify.delay(voter.uuid, notification_template, extra_vars)
@shared_task
def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}):
voter = Voter.objects.get(uuid=voter_uuid)
the_vars = copy.copy(extra_vars)
the_vars.update({'voter': voter})
subject = render_template_raw(None, subject_template, the_vars)
body = render_template_raw(None, body_template, the_vars)
voter.send_message(subject, body)
@shared_task
def single_voter_notify(voter_uuid, notification_template, extra_vars={}):
voter = Voter.objects.get(uuid=voter_uuid)
the_vars = copy.copy(extra_vars)
the_vars.update({'voter': voter})
notification = render_template_raw(None, notification_template, the_vars)
voter.send_notification(notification)
@shared_task
def election_compute_tally(election_id):
election = Election.objects.get(id=election_id)
election.compute_tally()
election_notify_admin.delay(election_id=election_id,
subject="encrypted tally computed",
body="""
The encrypted tally for election %s has been computed.
--
Helios
""" % election.name)
if election.has_helios_trustee():
tally_helios_decrypt.delay(election_id=election.id)
@shared_task
def tally_helios_decrypt(election_id):
election = Election.objects.get(id=election_id)
election.helios_trustee_decrypt()
election_notify_admin.delay(election_id=election_id,
subject='Helios Decrypt',
body="""
Helios has decrypted its portion of the tally
for election %s.
--
Helios
""" % election.name)
@shared_task
def voter_file_process(voter_file_id):
voter_file = VoterFile.objects.get(id=voter_file_id)
voter_file.process()
election_notify_admin.delay(election_id=voter_file.election.id,
subject='voter file processed',
body="""
Your voter file upload for election %s
has been processed.
%s voters have been created.
--
Helios
""" % (voter_file.election.name, voter_file.num_voters))
@shared_task
def election_notify_admin(election_id, subject, body):
election = Election.objects.get(id=election_id)
election.admin.send_message(subject, body)
| 29.746479
| 111
| 0.722775
| 548
| 4,224
| 5.268248
| 0.191606
| 0.062348
| 0.040527
| 0.045029
| 0.351576
| 0.264288
| 0.223069
| 0.182542
| 0.141323
| 0.105992
| 0
| 0.002335
| 0.18892
| 4,224
| 141
| 112
| 29.957447
| 0.840339
| 0.053741
| 0
| 0.354839
| 0
| 0
| 0.085664
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.064516
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8612815990d7f299a2f7af8454d7502cc4069e32
| 4,890
|
py
|
Python
|
corehq/apps/fixtures/tests.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | 1
|
2017-02-10T03:14:51.000Z
|
2017-02-10T03:14:51.000Z
|
corehq/apps/fixtures/tests.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/fixtures/tests.py
|
dslowikowski/commcare-hq
|
ad8885cf8dab69dc85cb64f37aeaf06106124797
|
[
"BSD-3-Clause"
] | null | null | null |
from xml.etree import ElementTree
from casexml.apps.case.tests.util import check_xml_line_by_line
from casexml.apps.case.xml import V2
from corehq.apps.fixtures import fixturegenerators
from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \
FixtureItemField, FieldList
from corehq.apps.fixtures.views import update_tables
from corehq.apps.fixtures.exceptions import FixtureVersionError
from corehq.apps.users.models import CommCareUser
from django.test import TestCase
class FixtureDataTest(TestCase):
def setUp(self):
self.domain = 'qwerty'
self.tag = "district"
self.data_type = FixtureDataType(
domain=self.domain,
tag=self.tag,
name="Districts",
fields=[
FixtureTypeField(
field_name="state_name",
properties=[]
),
FixtureTypeField(
field_name="district_name",
properties=["lang"]
),
FixtureTypeField(
field_name="district_id",
properties=[]
)
],
item_attributes=[],
)
self.data_type.save()
self.data_item = FixtureDataItem(
domain=self.domain,
data_type_id=self.data_type.get_id,
fields= {
"state_name": FieldList(
field_list=[
FixtureItemField(
field_value="Delhi_state",
properties={}
)
]
),
"district_name": FieldList(
field_list=[
FixtureItemField(
field_value="Delhi_in_HIN",
properties={"lang": "hin"}
),
FixtureItemField(
field_value="Delhi_in_ENG",
properties={"lang": "eng"}
)
]
),
"district_id": FieldList(
field_list=[
FixtureItemField(
field_value="Delhi_id",
properties={}
)
]
)
},
item_attributes={},
)
self.data_item.save()
self.user = CommCareUser.create(self.domain, 'to_delete', '***')
self.fixture_ownership = FixtureOwnership(
domain=self.domain,
owner_id=self.user.get_id,
owner_type='user',
data_item_id=self.data_item.get_id
)
self.fixture_ownership.save()
def tearDown(self):
self.data_type.delete()
self.data_item.delete()
self.user.delete()
self.fixture_ownership.delete()
def test_xml(self):
check_xml_line_by_line(self, """
<district>
<state_name>Delhi_state</state_name>
<district_name lang="hin">Delhi_in_HIN</district_name>
<district_name lang="eng">Delhi_in_ENG</district_name>
<district_id>Delhi_id</district_id>
</district>
""", ElementTree.tostring(self.data_item.to_xml()))
def test_ownership(self):
self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False))
self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False))
fixture, = fixturegenerators.item_lists(self.user, V2)
check_xml_line_by_line(self, """
<fixture id="item-list:district" user_id="%s">
<district_list>
<district>
<state_name>Delhi_state</state_name>
<district_name lang="hin">Delhi_in_HIN</district_name>
<district_name lang="eng">Delhi_in_ENG</district_name>
<district_id>Delhi_id</district_id>
</district>
</district_list>
</fixture>
""" % self.user.user_id, ElementTree.tostring(fixture))
self.data_item.remove_user(self.user)
self.assertItemsEqual([], self.data_item.get_all_users())
self.fixture_ownership = self.data_item.add_user(self.user)
self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False))
def test_get_indexed_items(self):
with self.assertRaises(FixtureVersionError):
fixtures = FixtureDataItem.get_indexed_items(self.domain,
self.tag, 'state_name')
delhi_id = fixtures['Delhi_state']['district_id']
self.assertEqual(delhi_id, 'Delhi_id')
| 36.492537
| 111
| 0.537628
| 459
| 4,890
| 5.455338
| 0.185185
| 0.047923
| 0.052716
| 0.029952
| 0.351038
| 0.321486
| 0.236022
| 0.216454
| 0.174121
| 0.174121
| 0
| 0.000645
| 0.365644
| 4,890
| 133
| 112
| 36.766917
| 0.806576
| 0
| 0
| 0.319328
| 0
| 0
| 0.195746
| 0.065658
| 0
| 0
| 0
| 0
| 0.05042
| 1
| 0.042017
| false
| 0
| 0.07563
| 0
| 0.12605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86160af095ef8d0435d3f1fd7140e93918c54b2c
| 2,685
|
py
|
Python
|
readthedocs/search/signals.py
|
agarwalrounak/readthedocs.org
|
4911600c230809bd6fb3585d1903121db2928ad6
|
[
"MIT"
] | 10
|
2019-05-21T03:00:40.000Z
|
2022-03-12T11:24:39.000Z
|
readthedocs/search/signals.py
|
agarwalrounak/readthedocs.org
|
4911600c230809bd6fb3585d1903121db2928ad6
|
[
"MIT"
] | 12
|
2019-12-05T04:47:01.000Z
|
2022-01-09T00:56:58.000Z
|
readthedocs/search/signals.py
|
agarwalrounak/readthedocs.org
|
4911600c230809bd6fb3585d1903121db2928ad6
|
[
"MIT"
] | 5
|
2019-07-08T23:45:10.000Z
|
2021-02-26T07:29:49.000Z
|
# -*- coding: utf-8 -*-
"""We define custom Django signals to trigger before executing searches."""
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django_elasticsearch_dsl.apps import DEDConfig
from readthedocs.projects.models import HTMLFile, Project
from readthedocs.projects.signals import bulk_post_create, bulk_post_delete
from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es
@receiver(bulk_post_create, sender=HTMLFile)
def index_html_file(instance_list, **_):
"""Handle indexing from the build process."""
from readthedocs.search.documents import PageDocument
kwargs = {
'app_label': HTMLFile._meta.app_label,
'model_name': HTMLFile.__name__,
'document_class': str(PageDocument),
'objects_id': [obj.id for obj in instance_list],
}
# Do not index if autosync is disabled globally
if DEDConfig.autosync_enabled():
index_objects_to_es(**kwargs)
@receiver(bulk_post_delete, sender=HTMLFile)
def remove_html_file(instance_list, **_):
"""Remove deleted files from the build process."""
from readthedocs.search.documents import PageDocument
kwargs = {
'app_label': HTMLFile._meta.app_label,
'model_name': HTMLFile.__name__,
'document_class': str(PageDocument),
'objects_id': [obj.id for obj in instance_list],
}
# Do not index if autosync is disabled globally
if DEDConfig.autosync_enabled():
delete_objects_in_es(**kwargs)
@receiver(post_save, sender=Project)
def index_project_save(instance, *args, **kwargs):
"""
Save a Project instance based on the post_save signal.post_save.
This uses Celery to do it async, replacing how django-elasticsearch-dsl does
it.
"""
from readthedocs.search.documents import ProjectDocument
kwargs = {
'app_label': Project._meta.app_label,
'model_name': Project.__name__,
'document_class': str(ProjectDocument),
'objects_id': [instance.id],
}
# Do not index if autosync is disabled globally
if DEDConfig.autosync_enabled():
index_objects_to_es.delay(**kwargs)
@receiver(pre_delete, sender=Project)
def remove_project_delete(instance, *args, **kwargs):
from readthedocs.search.documents import ProjectDocument
kwargs = {
'app_label': Project._meta.app_label,
'model_name': Project.__name__,
'document_class': str(ProjectDocument),
'objects_id': [instance.id],
}
# Don't `delay` this because the objects will be deleted already
if DEDConfig.autosync_enabled():
delete_objects_in_es(**kwargs)
| 33.987342
| 80
| 0.714339
| 335
| 2,685
| 5.450746
| 0.286567
| 0.035049
| 0.057503
| 0.065717
| 0.532311
| 0.532311
| 0.532311
| 0.532311
| 0.532311
| 0.492881
| 0
| 0.000459
| 0.188454
| 2,685
| 78
| 81
| 34.423077
| 0.83754
| 0.195531
| 0
| 0.6
| 0
| 0
| 0.081247
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.2
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86168b46a8faaf9e6d96f727abd89d459b3f8564
| 8,837
|
py
|
Python
|
TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/cli/progress_bars.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/cli/progress_bars.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
TimeWrapper_JE/venv/Lib/site-packages/pip/_internal/cli/progress_bars.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 1
|
2021-06-20T19:28:37.000Z
|
2021-06-20T19:28:37.000Z
|
import itertools
import sys
from signal import SIGINT, default_int_handler, signal
from typing import Any, Dict, List
from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar
from pip._vendor.progress.spinner import Spinner
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.logging import get_indentation
from pip._internal.utils.misc import format_size
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
def _select_progress_class(preferred, fallback):
# type: (Bar, Bar) -> Bar
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", ""),
getattr(preferred, "fill", ""),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
"".join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any
class InterruptibleMixin:
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
"""
Save the original SIGINT handler for later.
"""
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
# type: () -> None
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super().finish() # type: ignore
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame): # type: ignore
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class SilentBar(Bar):
def update(self):
# type: () -> None
pass
class BlueEmojiBar(IncrementalBar):
suffix = "%(percent)d%%"
bar_prefix = " "
bar_suffix = " "
phases = ("\U0001F539", "\U0001F537", "\U0001F535")
class DownloadProgressMixin:
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
self.message = (" " * (get_indentation() + 2)) + self.message # type: str
@property
def downloaded(self):
# type: () -> str
return format_size(self.index) # type: ignore
@property
def download_speed(self):
# type: () -> str
# Avoid zero division errors...
if self.avg == 0.0: # type: ignore
return "..."
return format_size(1 / self.avg) + "/s" # type: ignore
@property
def pretty_eta(self):
# type: () -> str
if self.eta: # type: ignore
return f"eta {self.eta_td}" # type: ignore
return ""
def iter(self, it): # type: ignore
for x in it:
yield x
# B305 is incorrectly raised here
# https://github.com/PyCQA/flake8-bugbear/issues/59
self.next(len(x)) # noqa: B305
self.finish()
class WindowsMixin:
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call needs to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor: # type: ignore
self.hide_cursor = False
# https://github.com/python/mypy/issues/5887
super().__init__(*args, **kwargs) # type: ignore
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file) # type: ignore
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar):
pass
class DownloadSilentBar(BaseDownloadProgressBar, SilentBar):
pass
class DownloadBar(BaseDownloadProgressBar, Bar):
pass
class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar):
pass
class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar):
pass
class DownloadProgressSpinner(
WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner
):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
# type: () -> str
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
# type: () -> None
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = "".join(
[
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
]
)
self.writeln(line)
BAR_TYPES = {
"off": (DownloadSilentBar, DownloadSilentBar),
"on": (DefaultDownloadProgressBar, DownloadProgressSpinner),
"ascii": (DownloadBar, DownloadProgressSpinner),
"pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner),
"emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner),
}
def DownloadProgressProvider(progress_bar, max=None): # type: ignore
if max is None or max == 0:
return BAR_TYPES[progress_bar][1]().iter
else:
return BAR_TYPES[progress_bar][0](max=max).iter
| 33.729008
| 88
| 0.625099
| 1,026
| 8,837
| 5.307992
| 0.305068
| 0.025707
| 0.015424
| 0.011017
| 0.129453
| 0.11256
| 0.101175
| 0.101175
| 0.101175
| 0.101175
| 0
| 0.009479
| 0.283694
| 8,837
| 261
| 89
| 33.858238
| 0.850869
| 0.37343
| 0
| 0.209302
| 0
| 0
| 0.043504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108527
| false
| 0.046512
| 0.077519
| 0.007752
| 0.434109
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86169ad5486623924eba0430b7afc33561fa170a
| 4,012
|
py
|
Python
|
scripts/study_case/ID_5/matchzoo/auto/tuner/tune.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 8
|
2021-06-30T06:55:14.000Z
|
2022-03-18T01:57:14.000Z
|
scripts/study_case/ID_5/matchzoo/auto/tuner/tune.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 1
|
2021-06-30T03:08:15.000Z
|
2021-06-30T03:08:15.000Z
|
scripts/study_case/ID_5/matchzoo/auto/tuner/tune.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 2
|
2021-11-17T11:19:48.000Z
|
2021-11-18T03:05:58.000Z
|
import typing
import numpy as np
import scripts.study_case.ID_5.matchzoo as mz
from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric
from .tuner import Tuner
def tune(
params: 'mz.ParamTable',
optimizer: str = 'adam',
trainloader: mz.dataloader.DataLoader = None,
validloader: mz.dataloader.DataLoader = None,
embedding: np.ndarray = None,
fit_kwargs: dict = None,
metric: typing.Union[str, BaseMetric] = None,
mode: str = 'maximize',
num_runs: int = 10,
verbose=1
):
"""
Tune model hyper-parameters.
A simple shorthand for using :class:`matchzoo.auto.Tuner`.
`model.params.hyper_space` reprensents the model's hyper-parameters
search space, which is the cross-product of individual hyper parameter's
hyper space. When a `Tuner` builds a model, for each hyper parameter in
`model.params`, if the hyper-parameter has a hyper-space, then a sample
will be taken in the space. However, if the hyper-parameter does not
have a hyper-space, then the default value of the hyper-parameter will
be used.
See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage.
:param params: A completed parameter table to tune. Usually `model.params`
of the desired model to tune. `params.completed()` should be `True`.
:param optimizer: Str or `Optimizer` class. Optimizer for optimizing model.
:param trainloader: Training data to use. Should be a `DataLoader`.
:param validloader: Testing data to use. Should be a `DataLoader`.
:param embedding: Embedding used by model.
:param fit_kwargs: Extra keyword arguments to pass to `fit`.
(default: `dict(epochs=10, verbose=0)`)
:param metric: Metric to tune upon. Must be one of the metrics in
`model.params['task'].metrics`. (default: the first metric in
`params.['task'].metrics`.
:param mode: Either `maximize` the metric or `minimize` the metric.
(default: 'maximize')
:param num_runs: Number of runs. Each run takes a sample in
`params.hyper_space` and build a model based on the sample.
(default: 10)
:param callbacks: A list of callbacks to handle. Handled sequentially
at every callback point.
:param verbose: Verbosity. (default: 1)
Example:
>>> import scripts.study_case.ID_5.matchzoo as mz
>>> import numpy as np
>>> train = mz.datasets.toy.load_data('train')
>>> valid = mz.datasets.toy.load_data('dev')
>>> prpr = mz.models.DenseBaseline.get_default_preprocessor()
>>> train = prpr.fit_transform(train, verbose=0)
>>> valid = prpr.transform(valid, verbose=0)
>>> trainset = mz.dataloader.Dataset(train)
>>> validset = mz.dataloader.Dataset(valid)
>>> padding = mz.models.DenseBaseline.get_default_padding_callback()
>>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding)
>>> validloader = mz.dataloader.DataLoader(validset, callback=padding)
>>> model = mz.models.DenseBaseline()
>>> model.params['task'] = mz.tasks.Ranking()
>>> optimizer = 'adam'
>>> embedding = np.random.uniform(-0.2, 0.2,
... (prpr.context['vocab_size'], 100))
>>> tuner = mz.auto.Tuner(
... params=model.params,
... optimizer=optimizer,
... trainloader=trainloader,
... validloader=validloader,
... embedding=embedding,
... num_runs=1,
... verbose=0
... )
>>> results = tuner.tune()
>>> sorted(results['best'].keys())
['#', 'params', 'sample', 'score']
"""
tuner = Tuner(
params=params,
optimizer=optimizer,
trainloader=trainloader,
validloader=validloader,
embedding=embedding,
fit_kwargs=fit_kwargs,
metric=metric,
mode=mode,
num_runs=num_runs,
verbose=verbose
)
return tuner.tune()
| 38.951456
| 79
| 0.642323
| 491
| 4,012
| 5.191446
| 0.342159
| 0.028246
| 0.034523
| 0.021185
| 0.173794
| 0.132993
| 0.122401
| 0.122401
| 0.096508
| 0
| 0
| 0.007556
| 0.241276
| 4,012
| 102
| 80
| 39.333333
| 0.829829
| 0.723829
| 0
| 0
| 0
| 0
| 0.029104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.166667
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
861799191a7d114eaded88fe6c8c8ba1d448c7c7
| 4,392
|
py
|
Python
|
libs/gym/tests/wrappers/test_pixel_observation.py
|
maxgold/icml22
|
49f026dd2314091639b52f5b8364a29e8000b738
|
[
"MIT"
] | null | null | null |
libs/gym/tests/wrappers/test_pixel_observation.py
|
maxgold/icml22
|
49f026dd2314091639b52f5b8364a29e8000b738
|
[
"MIT"
] | null | null | null |
libs/gym/tests/wrappers/test_pixel_observation.py
|
maxgold/icml22
|
49f026dd2314091639b52f5b8364a29e8000b738
|
[
"MIT"
] | null | null | null |
"""Tests for the pixel observation wrapper."""
from typing import Optional
import pytest
import numpy as np
import gym
from gym import spaces
from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY
class FakeEnvironment(gym.Env):
def __init__(self):
self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32)
def render(self, width=32, height=32, *args, **kwargs):
del args
del kwargs
image_shape = (height, width, 3)
return np.zeros(image_shape, dtype=np.uint8)
def reset(self, seed: Optional[int] = None):
super().reset(seed=seed)
observation = self.observation_space.sample()
return observation
def step(self, action):
del action
observation = self.observation_space.sample()
reward, terminal, info = 0.0, False, {}
return observation, reward, terminal, info
class FakeArrayObservationEnvironment(FakeEnvironment):
def __init__(self, *args, **kwargs):
self.observation_space = spaces.Box(
shape=(2,), low=-1, high=1, dtype=np.float32
)
super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs)
class FakeDictObservationEnvironment(FakeEnvironment):
def __init__(self, *args, **kwargs):
self.observation_space = spaces.Dict(
{
"state": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32),
}
)
super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs)
class TestPixelObservationWrapper(object):
@pytest.mark.parametrize("pixels_only", (True, False))
def test_dict_observation(self, pixels_only):
pixel_key = "rgb"
env = FakeDictObservationEnvironment()
# Make sure we are testing the right environment for the test.
observation_space = env.observation_space
assert isinstance(observation_space, spaces.Dict)
width, height = (320, 240)
# The wrapper should only add one observation.
wrapped_env = PixelObservationWrapper(
env,
pixel_keys=(pixel_key,),
pixels_only=pixels_only,
render_kwargs={pixel_key: {"width": width, "height": height}},
)
assert isinstance(wrapped_env.observation_space, spaces.Dict)
if pixels_only:
assert len(wrapped_env.observation_space.spaces) == 1
assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key]
else:
assert (
len(wrapped_env.observation_space.spaces)
== len(observation_space.spaces) + 1
)
expected_keys = list(observation_space.spaces.keys()) + [pixel_key]
assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys
# Check that the added space item is consistent with the added observation.
observation = wrapped_env.reset()
rgb_observation = observation[pixel_key]
assert rgb_observation.shape == (height, width, 3)
assert rgb_observation.dtype == np.uint8
@pytest.mark.parametrize("pixels_only", (True, False))
def test_single_array_observation(self, pixels_only):
pixel_key = "depth"
env = FakeArrayObservationEnvironment()
observation_space = env.observation_space
assert isinstance(observation_space, spaces.Box)
wrapped_env = PixelObservationWrapper(
env, pixel_keys=(pixel_key,), pixels_only=pixels_only
)
wrapped_env.observation_space = wrapped_env.observation_space
assert isinstance(wrapped_env.observation_space, spaces.Dict)
if pixels_only:
assert len(wrapped_env.observation_space.spaces) == 1
assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key]
else:
assert len(wrapped_env.observation_space.spaces) == 2
assert list(wrapped_env.observation_space.spaces.keys()) == [
STATE_KEY,
pixel_key,
]
observation = wrapped_env.reset()
depth_observation = observation[pixel_key]
assert depth_observation.shape == (32, 32, 3)
assert depth_observation.dtype == np.uint8
if not pixels_only:
assert isinstance(observation[STATE_KEY], np.ndarray)
| 35.136
| 85
| 0.651184
| 481
| 4,392
| 5.727651
| 0.218295
| 0.139383
| 0.127768
| 0.113249
| 0.517604
| 0.441016
| 0.404719
| 0.39637
| 0.362976
| 0.328857
| 0
| 0.01247
| 0.251366
| 4,392
| 124
| 86
| 35.419355
| 0.825426
| 0.050319
| 0
| 0.23913
| 0
| 0
| 0.01105
| 0
| 0
| 0
| 0
| 0
| 0.184783
| 1
| 0.086957
| false
| 0
| 0.065217
| 0
| 0.228261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8618701c5bffe90f48c4363133a7c542c718e23a
| 2,144
|
py
|
Python
|
real_plot_fft_stft_impl.py
|
MuAuan/Scipy-Swan
|
2d79175e8fc2ab8179ea95e1b22918c29d88b7b5
|
[
"MIT"
] | null | null | null |
real_plot_fft_stft_impl.py
|
MuAuan/Scipy-Swan
|
2d79175e8fc2ab8179ea95e1b22918c29d88b7b5
|
[
"MIT"
] | null | null | null |
real_plot_fft_stft_impl.py
|
MuAuan/Scipy-Swan
|
2d79175e8fc2ab8179ea95e1b22918c29d88b7b5
|
[
"MIT"
] | null | null | null |
import pyaudio
import wave
from scipy.fftpack import fft, ifft
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import signal
from swan import pycwt
CHUNK = 1024
FORMAT = pyaudio.paInt16 # int16型
CHANNELS = 1 # 1;monoral 2;ステレオ-
RATE = 22100 # 22.1kHz 44.1kHz
RECORD_SECONDS = 5 # 5秒録音
WAVE_OUTPUT_FILENAME = "output2.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
s=1
# figureの初期化
fig = plt.figure(figsize=(12, 10))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
ax2.axis([0, 5, 200,20000])
ax2.set_yscale('log')
while True:
fig.delaxes(ax1)
fig.delaxes(ax3)
ax1 = fig.add_subplot(311)
ax3 = fig.add_subplot(313)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
wavfile = WAVE_OUTPUT_FILENAME
wr = wave.open(wavfile, "rb")
ch = CHANNELS #wr.getnchannels()
width = p.get_sample_size(FORMAT) #wr.getsampwidth()
fr = RATE #wr.getframerate()
fn = wr.getnframes()
fs = fn / fr
origin = wr.readframes(wr.getnframes())
data = origin[:fn]
wr.close()
sig = np.frombuffer(data, dtype="int16") /32768.0
t = np.linspace(0,fs, fn/2, endpoint=False)
ax1.axis([0, 5, -0.0075,0.0075])
ax1.plot(t, sig)
nperseg = 256
f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg)
ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv')
freq =fft(sig,int(fn/2))
Pyy = np.sqrt(freq*freq.conj())*2/fn
f = np.arange(int(fn/2))
ax3.axis([200, 20000, 0,0.000075])
ax3.set_xscale('log')
ax3.plot(f,Pyy)
plt.pause(1)
plt.savefig('figure'+str(s)+'.png')
s += 1
| 24.930233
| 62
| 0.620802
| 316
| 2,144
| 4.14557
| 0.449367
| 0.022901
| 0.049618
| 0.024427
| 0.08855
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073646
| 0.233675
| 2,144
| 85
| 63
| 25.223529
| 0.723676
| 0.049907
| 0
| 0.057143
| 0
| 0
| 0.032544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.114286
| 0
| 0.114286
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
861975d3c36c28b9ba6319750aff575b598fb65c
| 4,147
|
py
|
Python
|
PID/PDControl.py
|
l756302098/ros_practice
|
4da8b4ddb25ada2e6f1adb3c0f8b34576aedf6b7
|
[
"MIT"
] | null | null | null |
PID/PDControl.py
|
l756302098/ros_practice
|
4da8b4ddb25ada2e6f1adb3c0f8b34576aedf6b7
|
[
"MIT"
] | null | null | null |
PID/PDControl.py
|
l756302098/ros_practice
|
4da8b4ddb25ada2e6f1adb3c0f8b34576aedf6b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import random
import numpy as np
import matplotlib.pyplot as plt
class Robot(object):
def __init__(self, length=20.0):
"""
Creates robotand initializes location/orientation to 0, 0, 0.
"""
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length =length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.steering_drift = 0.0
def set(self, x,y, orientation):
"""
Sets a robotcoordinate.
"""
self.x = x
self.y = y
self.orientation = orientation % (2.0 * np.pi)
def set_noise(self, steering_noise, distance_noise):
"""
Sets thenoise parameters.
"""
# makes itpossible to change the noise parameters
# this isoften useful in particle filters
self.steering_noise = steering_noise
self.distance_noise = distance_noise
def set_steering_drift(self, drift):
"""
Sets thesystematical steering drift parameter
"""
self.steering_drift = drift
def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):
"""
steering =front wheel steering angle, limited by max_steering_angle
distance =total distance driven, most be non-negative
"""
if steering> max_steering_angle:
steering= max_steering_angle
if steering <-max_steering_angle:
steering= -max_steering_angle
if distance< 0.0:
distance= 0.0
# apply noise
steering2 =random.gauss(steering, self.steering_noise)
distance2 =random.gauss(distance, self.distance_noise)
# applysteering drift
steering2 +=self.steering_drift
# Execute motion
turn =np.tan(steering2) * distance2 / self.length
if abs(turn)< tolerance:
#approximate by straight line motion
self.x +=distance2 * np.cos(self.orientation)
self.y +=distance2 * np.sin(self.orientation)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
else:
#approximate bicycle model for motion
radius =distance2 / turn
cx =self.x - (np.sin(self.orientation) * radius)
cy =self.y + (np.cos(self.orientation) * radius)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
self.x =cx + (np.sin(self.orientation) * radius)
self.y =cy - (np.cos(self.orientation) * radius)
def __repr__(self):
return'[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)
def run_p(robot, tau, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
for i in range(n):
cte = robot.y
steer = -tau* cte
robot.move(steer, speed)
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
return x_trajectory, y_trajectory
robot = Robot()
robot.set(0, 1, 0)
robot.set_noise(0.1,0.05)
def run(robot, tau_p, tau_d, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
#steering =-tau_p * CTE - tau_d * diff_CTE
crosstrack_error= []
crosstrack_error.append(0.0)
diff_CTE = 0.0
startX = robot.x
startY = robot.y
startOrientation= robot.orientation
distance = 0.0
for i in range(n):
steering =-tau_p * crosstrack_error[i] - tau_d * diff_CTE
distance =speed
robot.move(steering, distance)
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
# when in theoriginal path, x=robot.x ,caculate y.
x1 = robot.x
y1 = startY +(x1 - startX) * np.tan(startOrientation)
crosstrack =(robot.y - y1) * np.cos(startOrientation)
crosstrack_error.append(crosstrack)
diff_CTE =crosstrack_error[i+1] - crosstrack_error[i]
print("{} [{}, {}] {}, {}".format(i,robot.x, robot.y,steering, crosstrack))
return x_trajectory, y_trajectory
x_trajectory, y_trajectory = run(robot, 0.1, 1.0)
n = len(x_trajectory)
fig, ax1 = plt.subplots(1, 1, figsize=(8, 8))
ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller')
ax1.plot(x_trajectory, np.zeros(n), 'r', label='reference')
plt.show()
| 17.875
| 85
| 0.628647
| 557
| 4,147
| 4.551167
| 0.253142
| 0.010256
| 0.014201
| 0.052071
| 0.206706
| 0.134122
| 0.134122
| 0.134122
| 0.134122
| 0.076529
| 0
| 0.027591
| 0.248372
| 4,147
| 231
| 86
| 17.952381
| 0.785691
| 0.149023
| 0
| 0.183908
| 0
| 0
| 0.019842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091954
| false
| 0
| 0.034483
| 0.011494
| 0.16092
| 0.011494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
861c79331c252b7937573a42f8e033c57c978cd9
| 6,138
|
py
|
Python
|
oneflow/python/test/ops/test_l1loss.py
|
wanghongsheng01/framework_enflame
|
debf613e05e3f5ea8084c3e79b60d0dd9e349526
|
[
"Apache-2.0"
] | 2
|
2021-09-10T00:19:49.000Z
|
2021-11-16T11:27:20.000Z
|
oneflow/python/test/ops/test_l1loss.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | 1
|
2021-06-16T08:37:50.000Z
|
2021-06-16T08:37:50.000Z
|
oneflow/python/test/ops/test_l1loss.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | 1
|
2021-11-10T07:57:01.000Z
|
2021-11-10T07:57:01.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import numpy as np
import oneflow.typing as tp
from test_util import GenArgList
import unittest
from collections import OrderedDict
from typing import Dict
import os
def _compare_l1loss_with_np(
input_shape, target_shape, device_type, machine_ids, device_counts
):
input = np.random.random(size=input_shape).astype(np.float32)
target = np.random.random(size=target_shape).astype(np.float32)
assert device_type in ["cpu", "gpu"]
func_config = flow.FunctionConfig()
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
func_config.default_logical_view(flow.scope.consistent_view())
def np_l1loss(np_input, np_target):
np_l1 = np.abs(np_target - np_input)
np_l1_mean = np.mean(np_l1)
np_l1_sum = np.sum(np_l1)
np_l1_dict = {
"np_l1_loss": np_l1,
"np_l1_loss_mean": np_l1_mean,
"np_l1_loss_sum": np_l1_sum,
}
return np_l1_dict
def np_l1_loss_diff(np_input, np_target):
# Use numpy to compute diff
original_shape = np_target.shape
elemcnt = np_target.size
prediction = np_input.reshape(-1)
label = np_target.reshape(-1)
prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype)
for i in np.arange(elemcnt):
diff = prediction[i] - label[i]
prediction_grad[i] = np.sign(diff)
grad_mean = prediction_grad.reshape(original_shape) / elemcnt
# TODO: if you want to get the grad when the reduction = "sum", you can use the follow code
# grad_sum = prediction_grad.reshape(original_shape)
grad_dict = {
"np_grad_mean": grad_mean,
}
return grad_dict
# Use Numpy to compute l1 loss
np_out_l1loss_dict = np_l1loss(input, target)
# Use Numpy to compute l1 grad
np_grad_dict = np_l1_loss_diff(input, target)
def assert_prediction_grad(blob: tp.Numpy):
# Evaluate the gradient. Here we only test the reduction type == "mean"
assert np.allclose(blob, np_grad_dict["np_grad_mean"])
@flow.global_function(type="train", function_config=func_config)
def oneflow_l1loss(
of_input: tp.Numpy.Placeholder(shape=input.shape),
of_target: tp.Numpy.Placeholder(shape=target.shape),
) -> Dict[str, tp.Numpy]:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=target.shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
name="v",
)
x_var = of_input + v
# watch the diff
flow.watch_diff(x_var, assert_prediction_grad)
l1loss = flow.nn.L1Loss(x_var, of_target, reduction="none", name="of_l1loss")
l1loss_mean = flow.nn.L1Loss(
x_var, of_target, reduction="mean", name="of_l1loss_mean"
)
l1loss_sum = flow.nn.L1Loss(
x_var, of_target, reduction="sum", name="of_l1loss_sum"
)
with flow.scope.placement(device_type, "0:0"):
# We only test reduction="mean" diff
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(l1loss_mean)
return {
"of_l1_loss": l1loss,
"of_l1_loss_mean": l1loss_mean,
"of_l1_loss_sum": l1loss_sum,
}
of_out_l1loss_dict = oneflow_l1loss(input, target)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss"], np_out_l1loss_dict["np_l1_loss"]
)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss_mean"][0], np_out_l1loss_dict["np_l1_loss_mean"]
)
assert np.allclose(
of_out_l1loss_dict["of_l1_loss_sum"][0], np_out_l1loss_dict["np_l1_loss_sum"]
)
def _gen_arg_dict(shape, device_type, machine_ids, device_counts):
# Generate a dict to pass parameter to test case
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["target_shape"] = [shape]
arg_dict["device_type"] = [device_type]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testl1loss1n1d(flow.unittest.TestCase):
def test_l1loss_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(16, 3), device_type="cpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_l1loss_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 16, 32), device_type="gpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Testl1loss1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_l1loss_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 32, 16), device_type="gpu", machine_ids="0:0-1", device_counts=2
)
for arg in GenArgList(arg_dict):
_compare_l1loss_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| 33.540984
| 99
| 0.665689
| 863
| 6,138
| 4.431054
| 0.220162
| 0.018828
| 0.016736
| 0.01569
| 0.291318
| 0.239801
| 0.239801
| 0.198483
| 0.134414
| 0.118201
| 0
| 0.025319
| 0.234278
| 6,138
| 182
| 100
| 33.725275
| 0.788298
| 0.158684
| 0
| 0.129032
| 0
| 0
| 0.08042
| 0.008159
| 0
| 0
| 0
| 0.005495
| 0.056452
| 1
| 0.072581
| false
| 0
| 0.064516
| 0
| 0.185484
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
861cc7ffb7999a7f4d6f545192eee4e0b87dd394
| 869
|
py
|
Python
|
tests/test_schema.py
|
Dog-Egg/dida
|
17fd8dce0fe198e65effb48816a2339802234974
|
[
"MIT"
] | null | null | null |
tests/test_schema.py
|
Dog-Egg/dida
|
17fd8dce0fe198e65effb48816a2339802234974
|
[
"MIT"
] | 3
|
2021-06-15T19:10:55.000Z
|
2022-02-27T10:30:28.000Z
|
tests/test_schema.py
|
Dog-Egg/dida
|
17fd8dce0fe198e65effb48816a2339802234974
|
[
"MIT"
] | null | null | null |
import unittest
import datetime
from dida import schemas, triggers
from marshmallow import ValidationError
class TestTriggerSchema(unittest.TestCase):
def test_dump_trigger(self):
result = schemas.TriggerSchema().dump(triggers.IntervalTrigger())
print('IntervalTrigger dump:', result)
result = schemas.TriggerSchema().dump(triggers.DateTrigger())
print('DateTrigger dump:', result)
def test_load_trigger(self):
self.assertRaises(ValidationError, schemas.TriggerSchema().load, {"type": "unknown"})
obj = schemas.TriggerSchema().load({'type': "interval"})
self.assertIsInstance(obj, triggers.IntervalTrigger)
obj = schemas.TriggerSchema().load({'type': 'date', "params": {'run_date': "2020-01-01 00:00:00"}})
self.assertEqual(obj.run_date, datetime.datetime(2020, 1, 1).astimezone())
| 36.208333
| 107
| 0.700806
| 92
| 869
| 6.554348
| 0.413043
| 0.165837
| 0.119403
| 0.139303
| 0.228856
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027435
| 0.161105
| 869
| 23
| 108
| 37.782609
| 0.799726
| 0
| 0
| 0
| 0
| 0
| 0.117376
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.4375
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
861cdcc494cb3bd3e797fd81fd6a76984fde4f26
| 26,883
|
py
|
Python
|
apps/content/views.py
|
Sunbird-Ed/evolve-api
|
371b39422839762e32401340456c13858cb8e1e9
|
[
"MIT"
] | 1
|
2019-02-27T15:26:11.000Z
|
2019-02-27T15:26:11.000Z
|
apps/content/views.py
|
Sunbird-Ed/evolve-api
|
371b39422839762e32401340456c13858cb8e1e9
|
[
"MIT"
] | 9
|
2019-12-16T10:09:46.000Z
|
2022-03-11T23:42:12.000Z
|
apps/content/views.py
|
Sunbird-Ed/evolve-api
|
371b39422839762e32401340456c13858cb8e1e9
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework import status
from rest_framework.generics import (
ListAPIView,
ListCreateAPIView,
ListAPIView,
RetrieveUpdateAPIView,)
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import permission_classes
from apps.configuration.models import Book
from apps.hardspot.models import HardSpot
from .models import Content,ContentContributors
from .serializers import (
ContentListSerializer,
BookNestedSerializer,
BookListSerializer,
ContentStatusListSerializer,
SectionKeywordSerializer,
SubSectionKeywordSerializer,
SectionKeywordsSerializer,
ChapterKeywordsSerializer,
SubSectionKeywordsSerializer,
KeywordSerializer,
ContentContributorSerializer,
ApprovedContentSerializer,
ContentStatusSerializer,
HardSpotCreateSerializer,
ContentContributorsSerializer,
SubSubSectionKeywordsSerializer,
ContentStatusSerializerFileFormat,
)
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from rest_framework.parsers import MultiPartParser
from apps.dataupload.models import (Chapter,
Section,
SubSection,
ChapterKeyword,
SectionKeyword,
SubSectionKeyword,
SubSubSectionKeyword,
)
import json
import pandas as pd
from evolve import settings
from evolve import settings
from azure.storage.blob import (
BlockBlobService,
ContainerPermissions
)
from datetime import datetime, timedelta
import os
import itertools
from django.db.models import Q
import threading
account_name = settings.AZURE_ACCOUNT_NAME
account_key = settings.AZURE_ACCOUNT_KEY
CONTAINER_NAME= settings.AZURE_CONTAINER
block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key)
class ContentList(ListCreateAPIView):
queryset = Content.objects.all()
serializer_class = KeywordSerializer
parser_classes = (MultiPartParser,)
def get(self, request):
try:
queryset = self.get_queryset()
serializer = ContentStatusListSerializer(queryset, many=True)
context = {"success": True, "message": "Chapter List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Chapter list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def post(self, request,format=None):
try:
serializer = ContentListSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
context = {"success": True, "message": "Created Successful", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
context = {"success": False, "message": "Invalid Input Data to create content"}
return Response(context, status=status.HTTP_400_BAD_REQUEST)
except Exception as error:
context = {'success': "false", 'message': 'Failed to create content.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@permission_classes((IsAuthenticated,))
class ContentRetrieveUpdate(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = ContentListSerializer
def get(self, request):
try:
queryset = self.get_object()
serializer = ContentListSerializer(queryset, many=True)
context = {"success": True, "message": "Chapter List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get content list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def put(self, request, pk, format=None):
try:
try:
content_list = self.get_object()
except Exception as error:
context = {'success': "false", 'message': 'content Id does not exist.'}
return Response(context, status=status.HTTP_404_NOT_FOUND)
serializer = ContentListSerializer(content_list, data=request.data, context={"user":request.user}, partial=True)
if serializer.is_valid():
serializer.save()
context = {"success": True, "message": "Updation Successful","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
context = {"success": False, "message": "Updation Failed"}
return Response(context, status=status.HTTP_400_BAD_REQUEST)
except Exception as error:
context = {'success': "false", 'message': 'Failed To Update content Details.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class BookNestedList(ListAPIView):
queryset = Book.objects.all()
serializer_class = BookNestedSerializer
def get(self, request):
try:
subject = request.query_params.get('subject', None)
if subject is not None:
queryset=self.get_queryset().filter(subject__id=subject, content_only=True)
else:
queryset = self.get_queryset().filter(content_only=True)
serializer = BookNestedSerializer(queryset, many=True)
context = {"success": True, "message": "Conetent List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class BookListView(ListAPIView):
queryset = Book.objects.all()
serializer_class = BookListSerializer
def get(self, request):
try:
subject = request.query_params.get('subject', None)
if subject is not None:
queryset=self.get_queryset().filter(subject__id=subject)
else:
queryset = self.get_queryset()
serializer = BookListSerializer(queryset, many=True)
context = {"success": True, "message": "Content List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Conetent list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentApprovedList(ListAPIView):
queryset = Content.objects.all()
serializer_class = KeywordSerializer
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section',None)
if chapter_id is not None:
queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True)
elif section_id is not None:
queryset = self.get_queryset().filter(section__id=section_id, approved=True)
elif sub_section_id is not None:
queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True)
elif sub_sub_section_id is not None:
queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True)
else:
queryset = self.get_queryset().filter(approved=True)
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content Approved List", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentPendingList(ListAPIView):
queryset = Content.objects.all()
serializer_class = KeywordSerializer
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section',None)
if chapter_id is not None:
queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None)
elif section_id is not None:
queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None)
elif sub_section_id is not None:
queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None)
elif sub_sub_section_id is not None:
queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None)
else:
queryset = self.get_queryset().filter(approved=False, approved_by=None)
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content Pending List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Pending list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentStatusList(ListCreateAPIView):
queryset = Content.objects.all()
serializer_class = ContentListSerializer
def get(self, request):
try:
if request.query_params.get('chapter', None) is not None:
queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None))
elif request.query_params.get('section', None) is not None:
queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None))
elif request.query_params.get('section', None) is not None:
queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None))
else:
queryset = self.get_queryset()
serializer = ContentListSerializer(queryset, many=True)
context = {"success": True, "message": "Content Status List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Status list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentRejectedList(ListAPIView):
queryset = Content.objects.all()
serializer_class = ContentListSerializer
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section',None)
if chapter_id is not None:
queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None)
elif section_id is not None:
queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None)
elif sub_section_id is not None:
queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None)
elif sub_sub_section_id is not None:
queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved = False).exclude(approved_by=None)
else:
queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None)
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content Rejected List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content Rejected list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class Keywords(ListAPIView):
queryset = Content.objects.all()
def get(self, request):
try:
chapter_id = request.query_params.get('chapter', None)
section_id = request.query_params.get('section', None)
sub_section_id = request.query_params.get('sub_section', None)
sub_sub_section_id = request.query_params.get('sub_sub_section', None)
if chapter_id is not None:
queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id)
serializer = ChapterKeywordsSerializer(queryset, many=True)
elif section_id is not None:
queryset = SectionKeyword.objects.filter(section__id = section_id)
serializer = SectionKeywordsSerializer(queryset, many=True)
elif sub_section_id is not None:
queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id)
serializer = SubSectionKeywordsSerializer(queryset, many=True)
elif sub_sub_section_id is not None:
queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id)
serializer = SubSubSectionKeywordsSerializer(queryset, many=True)
else:
queryset = self.get_queryset()
serializer = KeywordSerializer(queryset, many=True)
context = {"success": True, "message": "Content List","data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Content list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentContributorCreateView(ListCreateAPIView):
queryset = ContentContributors.objects.all()
serializer_class = ContentContributorSerializer
def post(self, request):
try:
queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first()
if queryset is not None:
if str(queryset.email) == "" and request.data['email'] is not None:
ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email'])
queryset.refresh_from_db()
serializer = ContentContributorSerializer(queryset)
context = {"success": True, "message": "Successful", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
else:
serializer = ContentContributorSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
context = {"success": True, "message": "Successful", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
context = {"success": False, "message": "Invalid Input Data to create Pesonal details"}
return Response(context, status=status.HTTP_400_BAD_REQUEST)
except Exception as error:
context = {'success': "false", 'message': 'Failed to Personal Details.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@permission_classes((IsAuthenticated,))
class ApprovedContentDownloadView(ListAPIView):
queryset = Book.objects.all()
def get(self, request):
try:
final_list = []
import os
from shutil import copyfile
book = request.query_params.get('book', None)
chapters=Chapter.objects.filter(book_id=book).order_by('id')
serializer = ApprovedContentSerializer(chapters, many=True)
for data in serializer.data:
for d in data['chapter']:
final_list.append(d)
repeat_list=['Content Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords']
data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5)))))
exists = os.path.isfile('ApprovedContent.csv')
path = settings.MEDIA_ROOT + '/files/'
if exists:
os.remove('ApprovedContent.csv')
data_frame.to_csv(path + 'ApprovedContent.csv', encoding="utf-8-sig", index=False)
context = {"success": True, "message": "Activity List", "data": 'media/files/ApprovedContent.csv'}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentStatusDownloadView(RetrieveUpdateAPIView):
queryset = HardSpot.objects.all()
serializer_class = HardSpotCreateSerializer
def get(self, request):
try:
final_list = []
import os
from shutil import copyfile
book_id = request.query_params.get('book', None)
book_name=""
if book_id is not None:
book_name=Book.objects.get(id=book_id)
chapters=Chapter.objects.filter(book__id=book_id).order_by('id')
serializer = ContentStatusSerializer(chapters, many=True)
for data in serializer.data:
for d in data['chapter']:
final_list.append(d)
data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots'])
exists = os.path.isfile('{}_contentstatus.csv'.format(book_name))
path = settings.MEDIA_ROOT + '/files/'
if exists:
os.remove('{}_contentstatus.csv'.format(book_name))
# data_frame.to_excel(path + 'contentstatus.xlsx')
data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding="utf-8-sig", index=False)
context = {"success": True, "message": "Activity List","data": 'media/files/{}_contentstatus.csv'.format(book_name)}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@permission_classes((IsAuthenticated,))
class ContentContributorsDownloadView(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = HardSpotCreateSerializer
def get(self, request):
try:
final_list = []
import os
from shutil import copyfile
state_id = request.query_params.get('state', None)
if state_id is not None:
queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct()
else:
queryset = self.get_queryset()
serializer = ContentContributorsSerializer(queryset, many=True)
res_list = []
for i in range(len(serializer.data)):
if serializer.data[i] not in serializer.data[i + 1:]:
res_list.append(serializer.data[i])
for data in res_list:
for d in res_list:
final_list.append(d)
data_frame = pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates()
exists = os.path.isfile('content_contributers.csv')
path = settings.MEDIA_ROOT + '/files/'
if exists:
os.remove('content_contributers.csv')
# data_frame.to_excel(path + 'content_contributers.xlsx')
data_frame.to_csv(path + 'content_contributers.csv', encoding="utf-8-sig", index=False)
context = {"success": True, "message": "Activity List","data": 'media/files/content_contributers.csv'}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = { 'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class GetSASView(ListAPIView):
def get(self,request):
try:
sas_url = block_blob_service.generate_container_shared_access_signature(
CONTAINER_NAME,
ContainerPermissions.WRITE,
datetime.utcnow() + timedelta(hours=1),
)
base_url=account_name+".blob.core.windows.net/"+CONTAINER_NAME
context = {"success": True, "message": "url link", "token":sas_url,"base_url":base_url}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class GetSasDownloadView(ListAPIView):
def get(self,request):
from evolve import settings
accountName = settings.AZURE_ACCOUNT_NAME
accountKey = settings.AZURE_ACCOUNT_KEY
containerName= settings.AZURE_CONTAINER
try:
blobService = BlockBlobService(account_name=accountName, account_key=accountKey)
sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10))
context = {"success": True, "token":sas_token}
return Response(context, status=status.HTTP_200_OK)
except:
return None
class ContentListUrlUpdate(ListAPIView):
queryset = Content.objects.all()
serializer_class = ContentStatusSerializer
def get(self, request):
try:
queryset = self.get_queryset().filter(approved=True)
serializer = ContentStatusSerializerFileFormat(queryset, many=True)
context = {"success": True, "message": "OtherContent Approved List", "data": serializer.data}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentListUrlPutRequest(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = ContentStatusSerializer
def post(self, request):
try:
datalist = request.data
print(datalist)
for data in datalist:
print(data)
Content.objects.filter(pk=data['content_id']).update(video=data['video'])
context = {"success": True, "message": "update successfull"}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView):
queryset = Content.objects.all()
serializer_class = ContentStatusSerializer
def post(self, request):
try:
datalist = request.data
print(datalist)
for data in datalist:
Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database'])
context = {"success": True, "message": "update successfull"}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get OtherContent Approved list.'}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
class BackupContent(ListAPIView):
queryset = Book.objects.all()
def get(self,request):
try:
t = threading.Thread(target=self.index, args=(), kwargs={})
t.setDaemon(True)
t.start()
context = {"success": True, "message": "Activity List", "data": 'media/files/BackupContent.csv'}
return Response(context, status=status.HTTP_200_OK)
except Exception as error:
context = {'success': "false", 'message': 'Failed to get Activity list.' ,"error" :str(error)}
return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def index(self):
final_list,final = [],[]
queryset = Content.objects.filter(approved=True)
for i in queryset:
try:
if i.video is not None :
final=[i.id,i.video]
final_list.append(final)
except Exception as e:
pass
path = settings.MEDIA_ROOT + '/files/'
data_frame = pd.DataFrame(final_list , columns=['id','url'])
data_frame.to_csv(path+ 'BackupContent.csv', encoding="utf-8-sig", index=False)
| 47.246046
| 386
| 0.654986
| 2,889
| 26,883
| 5.893043
| 0.098996
| 0.025374
| 0.05674
| 0.072952
| 0.707372
| 0.674949
| 0.656975
| 0.622496
| 0.606755
| 0.578737
| 0
| 0.007644
| 0.245694
| 26,883
| 568
| 387
| 47.329225
| 0.831936
| 0.003869
| 0
| 0.539112
| 0
| 0
| 0.117324
| 0.009189
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0.002114
| 0.065539
| 0
| 0.319239
| 0.006342
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
861eccc43552e108c8eb7cab4531e62034debd26
| 5,446
|
py
|
Python
|
taming/data/ade20k.py
|
ZlodeiBaal/taming
|
b6c0f896992881f154bdfd910a8163ee754df83a
|
[
"MIT"
] | null | null | null |
taming/data/ade20k.py
|
ZlodeiBaal/taming
|
b6c0f896992881f154bdfd910a8163ee754df83a
|
[
"MIT"
] | null | null | null |
taming/data/ade20k.py
|
ZlodeiBaal/taming
|
b6c0f896992881f154bdfd910a8163ee754df83a
|
[
"MIT"
] | 1
|
2022-01-31T15:55:24.000Z
|
2022-01-31T15:55:24.000Z
|
import os
import numpy as np
import cv2
import albumentations
from PIL import Image
from torch.utils.data import Dataset
from taming.data.sflckr import SegmentationBase # for examples included in repo
class Examples(SegmentationBase):
def __init__(self, size=256, random_crop=False, interpolation="bicubic"):
super().__init__(data_csv="data/ade20k_examples.txt",
data_root="data/ade20k_images",
segmentation_root="data/ade20k_segmentations",
size=size, random_crop=random_crop,
interpolation=interpolation,
n_labels=151, shift_segmentation=False)
# With semantic map and scene label
class ADE20kBase(Dataset):
def __init__(self, config=None, size=None, random_crop=False, interpolation="bicubic", crop_size=None):
self.split = self.get_split()
self.n_labels = 151 # unknown + 150
self.data_csv = {"train": "data/ade20k_train.txt",
"validation": "data/ade20k_test.txt"}[self.split]
self.data_root = "./data/ade20k_root"
with open(os.path.join(self.data_root, "sceneCategories.txt"), "r") as f:
self.scene_categories = f.read().splitlines()
self.scene_categories = dict(line.split() for line in self.scene_categories)
with open(self.data_csv, "r") as f:
self.image_paths = f.read().splitlines()
self._length = len(self.image_paths)
ss = self.split
if ss=='train':
ss='training'
self.labels = {
"relative_file_path_": [l for l in self.image_paths],
"file_path_": [os.path.join(self.data_root, "images",ss, l)
for l in self.image_paths],
"relative_segmentation_path_": [l.replace(".jpg", ".png")
for l in self.image_paths],
"segmentation_path_": [os.path.join(self.data_root, "annotations",ss,
l.replace(".jpg", ".png"))
for l in self.image_paths],
"scene_category": [self.scene_categories[l.replace(".jpg", "")]
for l in self.image_paths],
}
size = None if size is not None and size<=0 else size
self.size = size
if crop_size is None:
self.crop_size = size if size is not None else None
else:
self.crop_size = crop_size
if self.size is not None:
self.interpolation = interpolation
self.interpolation = {
"nearest": cv2.INTER_NEAREST,
"bilinear": cv2.INTER_LINEAR,
"bicubic": cv2.INTER_CUBIC,
"area": cv2.INTER_AREA,
"lanczos": cv2.INTER_LANCZOS4}[self.interpolation]
self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=self.interpolation)
self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,
interpolation=cv2.INTER_NEAREST)
if crop_size is not None:
self.center_crop = not random_crop
if self.center_crop:
self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size)
else:
self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size)
self.preprocessor = self.cropper
def __len__(self):
return self._length
def __getitem__(self, i):
example = dict((k, self.labels[k][i]) for k in self.labels)
image = Image.open(example["file_path_"])
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
if self.size is not None:
image = self.image_rescaler(image=image)["image"]
segmentation = Image.open(example["segmentation_path_"])
segmentation = np.array(segmentation).astype(np.uint8)
if self.size is not None:
segmentation = self.segmentation_rescaler(image=segmentation)["image"]
if self.size is not None:
processed = self.preprocessor(image=image, mask=segmentation)
else:
processed = {"image": image, "mask": segmentation}
example["image"] = (processed["image"]/127.5 - 1.0).astype(np.float32)
segmentation = processed["mask"]
onehot = np.eye(self.n_labels)[segmentation]
example["segmentation"] = onehot
return example
class ADE20kTrain(ADE20kBase):
# default to random_crop=True
def __init__(self, config=None, size=None, random_crop=True, interpolation="bicubic", crop_size=None):
super().__init__(config=config, size=size, random_crop=random_crop,
interpolation=interpolation, crop_size=crop_size)
def get_split(self):
return "train"
class ADE20kValidation(ADE20kBase):
def get_split(self):
return "validation"
if __name__ == "__main__":
dset = ADE20kValidation()
ex = dset[0]
for k in ["image", "scene_category", "segmentation"]:
print(type(ex[k]))
try:
print(ex[k].shape)
except:
print(ex[k])
| 42.546875
| 107
| 0.58722
| 617
| 5,446
| 4.983793
| 0.228525
| 0.033821
| 0.03187
| 0.029594
| 0.289106
| 0.225041
| 0.199024
| 0.168455
| 0.068293
| 0.022114
| 0
| 0.014819
| 0.306096
| 5,446
| 127
| 108
| 42.88189
| 0.798889
| 0.01928
| 0
| 0.119266
| 0
| 0
| 0.091267
| 0.018178
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06422
| false
| 0
| 0.06422
| 0.027523
| 0.201835
| 0.027523
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
861f13a8761f8f22a82c122d42219d7e56bf820e
| 14,650
|
py
|
Python
|
templates/federated_reporting/distributed_cleanup.py
|
olehermanse/masterfiles
|
bcee0a8c0a925e885ba47ba3300b96c722b91f02
|
[
"MIT"
] | 44
|
2015-01-12T05:26:46.000Z
|
2021-08-24T02:47:19.000Z
|
templates/federated_reporting/distributed_cleanup.py
|
olehermanse/masterfiles
|
bcee0a8c0a925e885ba47ba3300b96c722b91f02
|
[
"MIT"
] | 1,104
|
2015-01-02T08:17:57.000Z
|
2022-03-31T15:58:37.000Z
|
templates/federated_reporting/distributed_cleanup.py
|
Lex-2008/masterfiles
|
b43c44af2c4e544ff7d044e76580ced2168ce5e0
|
[
"MIT"
] | 79
|
2015-01-05T19:13:03.000Z
|
2021-08-25T07:57:31.000Z
|
#!/usr/bin/env python3
"""
fr_distributed_cleanup.py - a script to remove hosts which have migrated to
other feeder hubs. To be run on Federated Reporting superhub
after each import of feeder data.
First, to setup, enable fr_distributed_cleanup by setting a class in augments (def.json).
This enables policy in cfe_internal/enterprise/federation/federation.cf
```json
{
"classes": {
"cfengine_mp_enable_fr_distributed_cleanup": [ "any::" ]
}
}
```
After the policy has run on superhub and feeders, run this script
to setup fr_distributed_cleanup role and account on all feeders and superhubs with
proper RBAC settings for normal operation.
You will be prompted for superhub admin credentials and then
admin credentials on each feeder.
"""
import argparse
import logging
import os
import platform
import string
import random
import subprocess
import sys
from getpass import getpass
from nova_api import NovaApi
from cfsecret import read_secret, write_secret
WORKDIR = None
CFE_FR_TABLES = None
# get WORKDIR and CFE_FR_TABLES from config.sh
config_sh_path = os.path.join(os.path.dirname(__file__), "config.sh")
cmd = "source {}; echo $WORKDIR; echo $CFE_FR_TABLES".format(config_sh_path)
with subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=True, executable="/bin/bash"
) as proc:
lines = proc.stdout.readlines()
WORKDIR = lines[0].decode().strip()
CFE_FR_TABLES = [table.strip() for table in lines[1].decode().split()]
if not WORKDIR or not CFE_FR_TABLES:
print("Unable to get WORKDIR and CFE_FR_TABLES values from config.sh")
sys.exit(1)
# Primary dir in which to place various needed files
DISTRIBUTED_CLEANUP_DIR = "/opt/cfengine/federation/cftransport/distributed_cleanup"
# collect cert files from /var/cfengine/httpd/ssl/certs on
# superhub and feeders and cat all together into hubs.cert
CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, "hubs.cert")
# Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything.
# api calls will overwrite fr_distributed_cleanup user and role on superhub and all feeders.
DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, "state/fr_distributed_cleanup.cfsecret")
def interactive_setup():
fr_distributed_cleanup_password = "".join(random.choices(string.printable, k=20))
admin_pass = getpass(
prompt="Enter admin password for superhub {}: ".format(platform.node())
)
api = NovaApi(api_user="admin", api_password=admin_pass)
# first confirm that this host is a superhub
status = api.fr_hub_status()
if (
status["status"] == 200
and status["role"] == "superhub"
and status["configured"]
):
logger.debug("This host is a superhub configured for Federated Reporting.")
else:
if status["status"] == 401:
print("admin credentials are incorrect, try again")
sys.exit(1)
else:
print(
"Check the status to ensure role is superhub and configured is True. {}".format(
status
)
)
sys.exit(1)
feederResponse = api.fr_remote_hubs()
if not feederResponse["hubs"]:
print(
"No attached feeders. Please attach at least one feeder hub before running this script."
)
sys.exit(1)
email = input("Enter email for fr_distributed_cleanup accounts: ")
logger.info("Creating fr_distributed_cleanup role on superhub...")
response = api.put(
"role",
"fr_distributed_cleanup",
{
"description": "fr_distributed_cleanup Federated Host Cleanup role",
"includeContext": "cfengine",
},
)
if response["status"] != 201:
print(
"Problem creating fr_distributed_cleanup role on superhub. {}".format(
response
)
)
sys.exit(1)
response = api.put_role_permissions(
"fr_distributed_cleanup", ["query.post", "remoteHub.list", "hubStatus.get"]
)
if response["status"] != 201:
print("Unable to set RBAC permissions on role fr_distributed_cleanup")
sys.exit(1)
logger.info("Creating fr_distributed_cleanup user on superhub")
response = api.put(
"user",
"fr_distributed_cleanup",
{
"description": "fr_distributed_cleanup Federated Host Cleanup user",
"email": "{}".format(email),
"password": "{}".format(fr_distributed_cleanup_password),
"roles": ["fr_distributed_cleanup"],
},
)
if response["status"] != 201:
print(
"Problem creating fr_distributed_cleanup user on superhub. {}".format(
response
)
)
sys.exit(1)
for hub in feederResponse["hubs"]:
feeder_credentials = getpass(
prompt="Enter admin credentials for {} at {}: ".format(
hub["ui_name"], hub["api_url"]
)
)
feeder_hostname = hub["ui_name"]
feeder_api = NovaApi(
api_user="admin",
api_password=feeder_credentials,
cert_path=CERT_PATH,
hostname=feeder_hostname,
)
logger.info("Creating fr_distributed_cleanup role on %s", feeder_hostname)
response = feeder_api.put(
"role",
"fr_distributed_cleanup",
{
"description": "fr_distributed_cleanup Federated Host Cleanup role",
"includeContext": "cfengine",
},
)
if response["status"] != 201:
print(
"Problem creating fr_distributed_cleanup role on superhub. {}".format(
response
)
)
sys.exit(1)
response = feeder_api.put_role_permissions(
"fr_distributed_cleanup", ["host.delete"]
)
if response["status"] != 201:
print("Unable to set RBAC permissions on role fr_distributed_cleanup")
sys.exit(1)
logger.info("Creating fr_distributed_cleanup user on %s", feeder_hostname)
response = feeder_api.put(
"user",
"fr_distributed_cleanup",
{
"description": "fr_distributed_cleanup Federated Host Cleanup user",
"email": "{}".format(email),
"password": "{}".format(fr_distributed_cleanup_password),
"roles": ["fr_distributed_cleanup"],
},
)
if response["status"] != 201:
print(
"Problem creating fr_distributed_cleanup user on {}. {}".format(
feeder_hostname, response
)
)
sys.exit(1)
write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password)
def main():
if not os.geteuid() == 0:
sys.exit("\n{} must be run as root".format(os.path.basename(__file__)))
parser = argparse.ArgumentParser(
description="Clean up migrating clients in Federated Reporting setup"
)
group = parser.add_mutually_exclusive_group()
group.add_argument("--debug", action="store_true")
group.add_argument("--inform", action="store_true")
args = parser.parse_args()
global logger
logger = logging.getLogger("fr_distributed_cleanup")
ch = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
if args.inform:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
logger.addHandler(ch)
if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH):
if sys.stdout.isatty():
interactive_setup()
else:
print(
"{} requires manual setup, please run as root interactively.".format(
os.path.basename(__file__)
)
)
sys.exit(1)
fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH)
api = NovaApi(
api_user="fr_distributed_cleanup", api_password=fr_distributed_cleanup_password
) # defaults to localhost
response = api.fr_hub_status()
if not (
response["status"] == 200
and response["role"] == "superhub"
and response["configured"]
):
print(
"{} can only be run on a Federated Reporting hub configured to be superhub".format(
os.path.basename(__file__)
)
)
sys.exit(1)
response = api.fr_remote_hubs()
if not response["hubs"]:
print(
"No attached feeders. Please attach at least one feeder hub before running this script."
)
for hub in response["hubs"]:
if hub["role"] != "feeder" or hub["target_state"] != "on":
continue
feeder_hostkey = hub["hostkey"]
feeder_hostname = hub["ui_name"]
feeder_api = NovaApi(
api_user="fr_distributed_cleanup",
api_password=fr_distributed_cleanup_password,
cert_path=CERT_PATH,
hostname=feeder_hostname,
)
response = feeder_api.status()
if response["status"] != 200:
print(
"Unable to get status for feeder {}. Skipping".format(feeder_hostname)
)
continue
sql = "SELECT hub_id FROM __hubs WHERE hostkey = '{}'".format(feeder_hostkey)
response = api.query(sql)
if response["status"] != 200:
print("Unable to query for feeder hub_id. Response was {}".format(response))
continue
# query API should return one row, [0], and one column, [0], in rows value
feeder_hubid = response["rows"][0][0]
sql = """
SELECT DISTINCT hosts.hostkey
FROM hosts
WHERE hub_id = '{0}'
AND EXISTS(
SELECT 1 FROM lastseenhosts ls
JOIN (
SELECT hostkey, max(lastseentimestamp) as newesttimestamp
FROM lastseenhosts
WHERE lastseendirection = 'INCOMING'
GROUP BY hostkey
) as newest
ON ls.hostkey = newest.hostkey
AND ls.lastseentimestamp = newest.newesttimestamp
AND ls.hostkey = hosts.hostkey
AND ls.hub_id != '{0}'
)""".format(
feeder_hubid
)
response = api.query(sql)
if response["status"] != 200:
print(
"Unable to query for deletion candidates. Response was {}".format(
response
)
)
sys.exit(1)
logger.debug("Hosts to delete on %s are %s", hub["ui_name"], response["rows"])
hosts_to_delete = response["rows"]
if len(hosts_to_delete) == 0:
logger.info("%s: No hosts to delete. No actions taken.", feeder_hostname)
continue
logger.debug(
"%s host(s) to delete on feeder %s", len(hosts_to_delete), hub["ui_name"]
)
# build up a post-loop SQL statement to delete hosts locally from feeder schemas
# change to feeder schema to make deletions easier/more direct without having to
# specify hub_id in queries
post_sql = "set schema 'hub_{}';\n".format(feeder_hubid)
post_sql += "\\set ON_ERROR STOP on\n"
delete_sql = ""
post_hostkeys = []
for row in hosts_to_delete:
# The query API returns rows which are lists of column values.
# We only selected hostkey so will take the first value.
host_to_delete = row[0]
response = feeder_api.delete("host", host_to_delete)
# both 202 Accepted and 404 Not Found are acceptable responses
if response["status"] not in [202, 404]:
logger.warning(
"Delete %s on feeder %s got %s status code",
host_to_delete,
feeder_hostname,
response["status"],
)
continue
# only add the host_to_delete if it was successfully deleted on the feeder
post_hostkeys.append(host_to_delete)
if len(post_hostkeys) == 0:
logger.info(
"No hosts on feeder %s need processing on superhub so skipping post processing",
feeder_hostname,
)
continue
# simulate the host api delete process by setting current_timestamp in deleted column
# and delete from all federated tables similar to the clear_hosts_references() pgplsql function.
post_sql += "INSERT INTO __hosts (hostkey,deleted) VALUES"
for hostkey in post_hostkeys:
delete_sql += "('{}', CURRENT_TIMESTAMP) ".format(hostkey)
delete_sql += (
"ON CONFLICT (hostkey,hub_id) DO UPDATE SET deleted = excluded.deleted;\n"
)
clear_sql = "set schema 'public';\n"
for table in CFE_FR_TABLES:
# special case of partitioning, operating on parent table will work
if "__promiselog_*" in table:
table = "__promiselog"
clear_sql += (
"DELETE FROM {} WHERE hub_id = {} AND hostkey IN ({});\n".format(
table,
feeder_hubid,
",".join(["'{}'".format(hk) for hk in post_hostkeys]),
)
)
post_sql += delete_sql + clear_sql
logger.debug("Running SQL:\n%s", post_sql)
with subprocess.Popen(
["/var/cfengine/bin/psql", "cfdb"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
logger.debug("got a proc, sending sql...")
outs, errs = proc.communicate(input=post_sql.encode())
if "ERROR" in errs.decode("utf-8"):
print(
"Problem running post processing SQL. returncode was {}, stderr:\n{}\nstdout:\n{}".format(
proc.returncode, errs.decode("utf-8"), outs.decode("utf-8")
)
)
sys.exit(1)
logger.debug(
"Ran post processing SQL. returncode was %s, stderr:\n%s\nstdout:\n%s",
proc.returncode,
errs.decode("utf-8"),
outs.decode("utf-8"),
)
if len(hosts_to_delete) != 0:
logger.info(
"%s: %s host deletions processed",
hub["ui_name"],
len(hosts_to_delete),
)
if __name__ == "__main__":
main()
else:
raise ImportError("fr_distributed_cleanup.py must only be used as a script!")
| 35.386473
| 110
| 0.597543
| 1,684
| 14,650
| 5.025534
| 0.216746
| 0.102091
| 0.094529
| 0.026468
| 0.318445
| 0.280515
| 0.270117
| 0.236441
| 0.211627
| 0.204537
| 0
| 0.008234
| 0.303686
| 14,650
| 413
| 111
| 35.472155
| 0.82139
| 0.13215
| 0
| 0.309309
| 0
| 0.003003
| 0.30283
| 0.063372
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006006
| false
| 0.039039
| 0.036036
| 0
| 0.042042
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8626687151185e3140516d592a31a3534739d928
| 72,182
|
py
|
Python
|
Lib/test/test_urllib.py
|
Kshitijkrishnadas/haribol
|
ca45e633baaabaad3bb923f5633340ccf88d996c
|
[
"bzip2-1.0.6"
] | 4
|
2020-08-06T04:39:33.000Z
|
2020-12-01T08:35:09.000Z
|
Lib/test/test_urllib.py
|
Kshitijkrishnadas/haribol
|
ca45e633baaabaad3bb923f5633340ccf88d996c
|
[
"bzip2-1.0.6"
] | 6
|
2020-07-22T01:19:01.000Z
|
2021-04-25T15:03:35.000Z
|
Lib/test/test_urllib.py
|
Kshitijkrishnadas/haribol
|
ca45e633baaabaad3bb923f5633340ccf88d996c
|
[
"bzip2-1.0.6"
] | 2
|
2020-12-02T03:52:33.000Z
|
2021-01-20T01:36:09.000Z
|
"""Regression tests for what was in Python 2's "urllib" module"""
import urllib.parse
import urllib.request
import urllib.error
import http.client
import email.message
import io
import unittest
from unittest.mock import patch
from test import support
import os
try:
import ssl
except ImportError:
ssl = None
import sys
import tempfile
from nturl2path import url2pathname, pathname2url
from base64 import b64encode
import collections
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
# Shortcut for testing FancyURLopener
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""urlopen(url [, data]) -> open file-like object"""
global _urlopener
if proxies is not None:
opener = urllib.request.FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def FancyURLopener():
with support.check_warnings(
('FancyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
return urllib.request.FancyURLopener()
def fakehttp(fakedata, mock_close=False):
class FakeSocket(io.BytesIO):
io_refs = 1
def sendall(self, data):
FakeHTTPConnection.buf = data
def makefile(self, *args, **kwds):
self.io_refs += 1
return self
def read(self, amt=None):
if self.closed:
return b""
return io.BytesIO.read(self, amt)
def readline(self, length=None):
if self.closed:
return b""
return io.BytesIO.readline(self, length)
def close(self):
self.io_refs -= 1
if self.io_refs == 0:
io.BytesIO.close(self)
class FakeHTTPConnection(http.client.HTTPConnection):
# buffer to store data for verification in urlopen tests.
buf = None
def connect(self):
self.sock = FakeSocket(self.fakedata)
type(self).fakesock = self.sock
if mock_close:
# bpo-36918: HTTPConnection destructor calls close() which calls
# flush(). Problem: flush() calls self.fp.flush() which raises
# "ValueError: I/O operation on closed file" which is logged as an
# "Exception ignored in". Override close() to silence this error.
def close(self):
pass
FakeHTTPConnection.fakedata = fakedata
return FakeHTTPConnection
class FakeHTTPMixin(object):
def fakehttp(self, fakedata, mock_close=False):
fake_http_class = fakehttp(fakedata, mock_close=mock_close)
self._connection_class = http.client.HTTPConnection
http.client.HTTPConnection = fake_http_class
def unfakehttp(self):
http.client.HTTPConnection = self._connection_class
class FakeFTPMixin(object):
def fakeftp(self):
class FakeFtpWrapper(object):
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
pass
def retrfile(self, file, type):
return io.BytesIO(), 0
def close(self):
pass
self._ftpwrapper_class = urllib.request.ftpwrapper
urllib.request.ftpwrapper = FakeFtpWrapper
def unfakeftp(self):
urllib.request.ftpwrapper = self._ftpwrapper_class
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
# Create a temp file to use for testing
self.text = bytes("test_urllib: %s\n" % self.__class__.__name__,
"ascii")
f = open(support.TESTFN, 'wb')
try:
f.write(self.text)
finally:
f.close()
self.pathname = support.TESTFN
self.returned_obj = urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual(b'', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assertIsInstance(file_num, int, "fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it here and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assertIsInstance(self.returned_obj.info(), email.message.Message)
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertIsNone(self.returned_obj.getcode())
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison.
# Use the iterator in the usual implicit way to test for ticket #4608.
for line in self.returned_obj:
self.assertEqual(line, self.text)
def test_relativelocalfile(self):
self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k in list(os.environ):
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.request.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEqual('localhost', proxies['no'])
# List of no_proxies with space.
self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))
self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888'))
self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234'))
def test_proxy_cgi_ignore(self):
try:
self.env.set('HTTP_PROXY', 'http://somewhere:3128')
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
self.env.set('REQUEST_METHOD', 'GET')
proxies = urllib.request.getproxies_environment()
self.assertNotIn('http', proxies)
finally:
self.env.unset('REQUEST_METHOD')
self.env.unset('HTTP_PROXY')
def test_proxy_bypass_environment_host_match(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t')
self.assertTrue(bypass('localhost'))
self.assertTrue(bypass('LocalHost')) # MixedCase
self.assertTrue(bypass('LOCALHOST')) # UPPERCASE
self.assertTrue(bypass('.localhost'))
self.assertTrue(bypass('newdomain.com:1234'))
self.assertTrue(bypass('.newdomain.com:1234'))
self.assertTrue(bypass('foo.d.o.t')) # issue 29142
self.assertTrue(bypass('d.o.t'))
self.assertTrue(bypass('anotherdomain.com:8888'))
self.assertTrue(bypass('.anotherdomain.com:8888'))
self.assertTrue(bypass('www.newdomain.com:1234'))
self.assertFalse(bypass('prelocalhost'))
self.assertFalse(bypass('newdomain.com')) # no port
self.assertFalse(bypass('newdomain.com:1235')) # wrong port
def test_proxy_bypass_environment_always_match(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY', '*')
self.assertTrue(bypass('newdomain.com'))
self.assertTrue(bypass('newdomain.com:1234'))
self.env.set('NO_PROXY', '*, anotherdomain.com')
self.assertTrue(bypass('anotherdomain.com'))
self.assertFalse(bypass('newdomain.com'))
self.assertFalse(bypass('newdomain.com:1234'))
def test_proxy_bypass_environment_newline(self):
bypass = urllib.request.proxy_bypass_environment
self.env.set('NO_PROXY',
'localhost, anotherdomain.com, newdomain.com:1234')
self.assertFalse(bypass('localhost\n'))
self.assertFalse(bypass('anotherdomain.com:8888\n'))
self.assertFalse(bypass('newdomain.com:1234\n'))
class ProxyTests_withOrderedEnv(unittest.TestCase):
def setUp(self):
# We need to test conditions, where variable order _is_ significant
self._saved_env = os.environ
# Monkey patch os.environ, start with empty fake environment
os.environ = collections.OrderedDict()
def tearDown(self):
os.environ = self._saved_env
def test_getproxies_environment_prefer_lowercase(self):
# Test lowercase preference with removal
os.environ['no_proxy'] = ''
os.environ['No_Proxy'] = 'localhost'
self.assertFalse(urllib.request.proxy_bypass_environment('localhost'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
os.environ['http_proxy'] = ''
os.environ['HTTP_PROXY'] = 'http://somewhere:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual({}, proxies)
# Test lowercase preference of proxy bypass and correct matching including ports
os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234'
os.environ['No_Proxy'] = 'xyz.com'
self.assertTrue(urllib.request.proxy_bypass_environment('localhost'))
self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678'))
self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234'))
self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy'))
self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))
# Test lowercase preference with replacement
os.environ['http_proxy'] = 'http://somewhere:3128'
os.environ['Http_Proxy'] = 'http://somewhereelse:3128'
proxies = urllib.request.getproxies_environment()
self.assertEqual('http://somewhere:3128', proxies['http'])
class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):
"""Test urlopen() opening a fake http connection."""
def check_read(self, ver):
self.fakehttp(b"HTTP/" + ver + b" 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_url_fragment(self):
# Issue #11703: geturl() omits fragments in the original URL.
url = 'http://docs.python.org/library/urllib.html#OK'
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
fp = urllib.request.urlopen(url)
self.assertEqual(fp.geturl(), url)
finally:
self.unfakehttp()
def test_willclose(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
try:
resp = urlopen("http://www.python.org")
self.assertTrue(resp.fp.will_close)
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_path_with_control_char_rejected(self):
for char_no in list(range(0, 0x21)) + [0x7f]:
char = chr(char_no)
schemeless_url = f"//localhost:7777/test{char}/"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
escaped_char_repr = repr(char).replace('\\', r'\\')
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urllib.request.urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urllib.request.urlopen(f"https:{schemeless_url}")
# This code path quotes the URL so there is no injection.
resp = urlopen(f"http:{schemeless_url}")
self.assertNotIn(char, resp.geturl())
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_path_with_newline_header_injection_rejected(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
host = "localhost:7777?a=1 HTTP/1.1\r\nX-injected: header\r\nTEST: 123"
schemeless_url = "//" + host + ":8080/test/?test=a"
try:
# We explicitly test urllib.request.urlopen() instead of the top
# level 'def urlopen()' function defined in this... (quite ugly)
# test suite. They use different url opening codepaths. Plain
# urlopen uses FancyURLOpener which goes via a codepath that
# calls urllib.parse.quote() on the URL which makes all of the
# above attempts at injection within the url _path_ safe.
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, r"contain control.*\\r.*(found at least . .)"):
urllib.request.urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"):
urllib.request.urlopen(f"https:{schemeless_url}")
# This code path quotes the URL so there is no injection.
resp = urlopen(f"http:{schemeless_url}")
self.assertNotIn(' ', resp.geturl())
self.assertNotIn('\r', resp.geturl())
self.assertNotIn('\n', resp.geturl())
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_host_with_control_char_rejected(self):
for char_no in list(range(0, 0x21)) + [0x7f]:
char = chr(char_no)
schemeless_url = f"//localhost{char}/test/"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
try:
escaped_char_repr = repr(char).replace('\\', r'\\')
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, f"contain control.*{escaped_char_repr}"):
urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(InvalidURL, f"contain control.*{escaped_char_repr}"):
urlopen(f"https:{schemeless_url}")
finally:
self.unfakehttp()
@unittest.skipUnless(ssl, "ssl module required")
def test_url_host_with_newline_header_injection_rejected(self):
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello.")
host = "localhost\r\nX-injected: header\r\n"
schemeless_url = "//" + host + ":8080/test/?test=a"
try:
InvalidURL = http.client.InvalidURL
with self.assertRaisesRegex(
InvalidURL, r"contain control.*\\r"):
urlopen(f"http:{schemeless_url}")
with self.assertRaisesRegex(InvalidURL, r"contain control.*\\n"):
urlopen(f"https:{schemeless_url}")
finally:
self.unfakehttp()
def test_read_0_9(self):
# "0.9" response accepted (but not "simple responses" without
# a status line)
self.check_read(b"0.9")
def test_read_1_0(self):
self.check_read(b"1.0")
def test_read_1_1(self):
self.check_read(b"1.1")
def test_read_bogus(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
self.assertRaises(OSError, urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_invalid_redirect(self):
# urlopen() should raise OSError for many error codes.
self.fakehttp(b'''HTTP/1.1 302 Found
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Location: file://guidocomputer.athome.com:/python/license
Connection: close
Content-Type: text/html; charset=iso-8859-1
''', mock_close=True)
try:
msg = "Redirection to url 'file:"
with self.assertRaisesRegex(urllib.error.HTTPError, msg):
urlopen("http://python.org/")
finally:
self.unfakehttp()
def test_redirect_limit_independent(self):
# Ticket #12923: make sure independent requests each use their
# own retry limit.
for i in range(FancyURLopener().maxtries):
self.fakehttp(b'''HTTP/1.1 302 Found
Location: file://guidocomputer.athome.com:/python/license
Connection: close
''', mock_close=True)
try:
self.assertRaises(urllib.error.HTTPError, urlopen,
"http://something")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises OSError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp(b'')
try:
self.assertRaises(OSError, urlopen, "http://something")
finally:
self.unfakehttp()
def test_missing_localfile(self):
# Test for #10836
with self.assertRaises(urllib.error.URLError) as e:
urlopen('file://localhost/a/file/which/doesnot/exists.py')
self.assertTrue(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_file_notexists(self):
fd, tmp_file = tempfile.mkstemp()
tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')
try:
self.assertTrue(os.path.exists(tmp_file))
with urlopen(tmp_fileurl) as fobj:
self.assertTrue(fobj)
finally:
os.close(fd)
os.unlink(tmp_file)
self.assertFalse(os.path.exists(tmp_file))
with self.assertRaises(urllib.error.URLError):
urlopen(tmp_fileurl)
def test_ftp_nohost(self):
test_ftp_url = 'ftp:///path'
with self.assertRaises(urllib.error.URLError) as e:
urlopen(test_ftp_url)
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
def test_ftp_nonexisting(self):
with self.assertRaises(urllib.error.URLError) as e:
urlopen('ftp://localhost/a/file/which/doesnot/exists.py')
self.assertFalse(e.exception.filename)
self.assertTrue(e.exception.reason)
@patch.object(urllib.request, 'MAXFTPCACHE', 0)
def test_ftp_cache_pruning(self):
self.fakeftp()
try:
urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])
urlopen('ftp://localhost')
finally:
self.unfakeftp()
def test_userpass_inurl(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
fp = urlopen("http://user:pass@python.org/")
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_userpass_inurl_w_spaces(self):
self.fakehttp(b"HTTP/1.0 200 OK\r\n\r\nHello!")
try:
userpass = "a b:c d"
url = "http://{}@python.org/".format(userpass)
fakehttp_wrapper = http.client.HTTPConnection
authorization = ("Authorization: Basic %s\r\n" %
b64encode(userpass.encode("ASCII")).decode("ASCII"))
fp = urlopen(url)
# The authorization header must be in place
self.assertIn(authorization, fakehttp_wrapper.buf.decode("UTF-8"))
self.assertEqual(fp.readline(), b"Hello!")
self.assertEqual(fp.readline(), b"")
# the spaces are quoted in URL so no match
self.assertNotEqual(fp.geturl(), url)
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_URLopener_deprecation(self):
with support.check_warnings(('',DeprecationWarning)):
urllib.request.URLopener()
@unittest.skipUnless(ssl, "ssl module required")
def test_cafile_and_context(self):
context = ssl.create_default_context()
with support.check_warnings(('', DeprecationWarning)):
with self.assertRaises(ValueError):
urllib.request.urlopen(
"https://localhost", cafile="/nonexistent/path", context=context
)
class urlopen_DataTests(unittest.TestCase):
"""Test urlopen() opening a data URL."""
def setUp(self):
# text containing URL special- and unicode-characters
self.text = "test data URLs :;,%=& \u00f6 \u00c4 "
# 2x1 pixel RGB PNG image with one black and one white pixel
self.image = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00'
b'\x01\x08\x02\x00\x00\x00{@\xe8\xdd\x00\x00\x00\x01sRGB\x00\xae'
b'\xce\x1c\xe9\x00\x00\x00\x0fIDAT\x08\xd7c```\xf8\xff\xff?\x00'
b'\x06\x01\x02\xfe\no/\x1e\x00\x00\x00\x00IEND\xaeB`\x82')
self.text_url = (
"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3"
"D%26%20%C3%B6%20%C3%84%20")
self.text_url_base64 = (
"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs"
"sJT0mIPYgxCA%3D")
# base64 encoded data URL that contains ignorable spaces,
# such as "\n", " ", "%0A", and "%20".
self.image_url = (
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\n"
"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 "
"vHgAAAABJRU5ErkJggg%3D%3D%0A%20")
self.text_url_resp = urllib.request.urlopen(self.text_url)
self.text_url_base64_resp = urllib.request.urlopen(
self.text_url_base64)
self.image_url_resp = urllib.request.urlopen(self.image_url)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines",
"close", "info", "geturl", "getcode", "__iter__"):
self.assertTrue(hasattr(self.text_url_resp, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_info(self):
self.assertIsInstance(self.text_url_resp.info(), email.message.Message)
self.assertEqual(self.text_url_base64_resp.info().get_params(),
[('text/plain', ''), ('charset', 'ISO-8859-1')])
self.assertEqual(self.image_url_resp.info()['content-length'],
str(len(self.image)))
self.assertEqual(urllib.request.urlopen("data:,").info().get_params(),
[('text/plain', ''), ('charset', 'US-ASCII')])
def test_geturl(self):
self.assertEqual(self.text_url_resp.geturl(), self.text_url)
self.assertEqual(self.text_url_base64_resp.geturl(),
self.text_url_base64)
self.assertEqual(self.image_url_resp.geturl(), self.image_url)
def test_read_text(self):
self.assertEqual(self.text_url_resp.read().decode(
dict(self.text_url_resp.info().get_params())['charset']), self.text)
def test_read_text_base64(self):
self.assertEqual(self.text_url_base64_resp.read().decode(
dict(self.text_url_base64_resp.info().get_params())['charset']),
self.text)
def test_read_image(self):
self.assertEqual(self.image_url_resp.read(), self.image)
def test_missing_comma(self):
self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')
def test_invalid_base64_data(self):
# missing padding character
self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(support.TESTFN)
self.text = b'testing urllib.urlretrieve'
try:
FILE = open(support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
filePath = os.path.abspath(filePath)
try:
filePath.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filePath is not encodable to utf8")
return "file://%s" % urllib.request.pathname2url(filePath)
def createNewTempFile(self, data=b""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.request.urlretrieve("file:%s" % support.TESTFN)
self.assertEqual(result[0], support.TESTFN)
self.assertIsInstance(result[1], email.message.Message,
"did not get an email.message.Message instance "
"as second returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.request.urlretrieve(self.constructLocalFileUrl(
support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assertTrue(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = open(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(block_count, block_read_size, file_size, count_holder=[0]):
self.assertIsInstance(block_count, int)
self.assertIsInstance(block_read_size, int)
self.assertIsInstance(file_size, int)
self.assertEqual(block_count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.request.urlretrieve(
self.constructLocalFileUrl(support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile()
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 5)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][2], 5)
self.assertEqual(report[1][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(block_count, block_read_size, file_size, _report=report):
_report.append((block_count, block_read_size, file_size))
srcFileName = self.createNewTempFile(b"x" * 8193)
urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),
support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][2], 8193)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[1][1], 8192)
self.assertEqual(report[2][1], 8192)
class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL,
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL)
finally:
self.unfakehttp()
class QuotingTests(unittest.TestCase):
r"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 3986 (Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>.
The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a
character properly. Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-~"])
result = urllib.parse.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %r != %r" % (do_not_quote, result))
result = urllib.parse.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %r != %r" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.parse.quote.__defaults__[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.parse.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
result = urllib.parse.quote_plus(quote_by_default,
safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %r != %r" %
(quote_by_default, result))
# Safe expressed as bytes rather than str
result = urllib.parse.quote(quote_by_default, safe=b"<>")
self.assertEqual(quote_by_default, result,
"using quote(): %r != %r" % (quote_by_default, result))
# "Safe" non-ASCII characters should have no effect
# (Since URIs are not allowed to have non-ASCII characters)
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
# Same as above, but using a bytes rather than str
result = urllib.parse.quote("a\xfcb", encoding="latin-1", safe=b"\xfc")
expect = urllib.parse.quote("a\xfcb", encoding="latin-1", safe="")
self.assertEqual(expect, result,
"using quote(): %r != %r" %
(expect, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append(r'<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.parse.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): "
"%s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.parse.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.parse.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %r != %r" % (expected, result))
result = urllib.parse.quote_plus(partial_quote)
self.assertEqual(expected, result,
"using quote_plus(): %r != %r" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.parse.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %r != %r" % (result, hexescape(' ')))
result = urllib.parse.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %r != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.parse.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
# Test with bytes
self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),
'alpha%2Bbeta+gamma')
# Test with safe bytes
self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),
'alpha+beta+gamma')
def test_quote_bytes(self):
# Bytes should quote directly to percent-encoded values
given = b"\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Encoding argument should raise type error on bytes input
self.assertRaises(TypeError, urllib.parse.quote, given,
encoding="latin-1")
# quote_from_bytes should work the same
result = urllib.parse.quote_from_bytes(given)
self.assertEqual(expect, result,
"using quote_from_bytes(): %r != %r"
% (expect, result))
def test_quote_with_unicode(self):
# Characters in Latin-1 range, encoded by default in UTF-8
given = "\xa2\xd8ab\xff"
expect = "%C2%A2%C3%98ab%C3%BF"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded by with None (default)
result = urllib.parse.quote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in Latin-1 range, encoded with Latin-1
given = "\xa2\xd8ab\xff"
expect = "%A2%D8ab%FF"
result = urllib.parse.quote(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded by default in UTF-8
given = "\u6f22\u5b57" # "Kanji"
expect = "%E6%BC%A2%E5%AD%97"
result = urllib.parse.quote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with Latin-1
given = "\u6f22\u5b57"
self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,
encoding="latin-1")
# Characters in BMP, encoded with Latin-1, with replace error handling
given = "\u6f22\u5b57"
expect = "%3F%3F" # "??"
result = urllib.parse.quote(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
# Characters in BMP, Latin-1, with xmlcharref error handling
given = "\u6f22\u5b57"
expect = "%26%2328450%3B%26%2323383%3B" # "漢字"
result = urllib.parse.quote(given, encoding="latin-1",
errors="xmlcharrefreplace")
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
def test_quote_plus_with_unicode(self):
# Encoding (latin-1) test for quote_plus
given = "\xa2\xd8 \xff"
expect = "%A2%D8+%FF"
result = urllib.parse.quote_plus(given, encoding="latin-1")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
# Errors test for quote_plus
given = "ab\u6f22\u5b57 cd"
expect = "ab%3F%3F+cd"
result = urllib.parse.quote_plus(given, encoding="latin-1",
errors="replace")
self.assertEqual(expect, result,
"using quote_plus(): %r != %r" % (expect, result))
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.parse.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')
def test_unquoting_badpercent(self):
# Test unquoting on bad percent-escapes
given = '%xab'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%x'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
given = '%'
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result, "using unquote(): %r != %r"
% (expect, result))
# unquote_to_bytes
given = '%xab'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%x'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
given = '%'
expect = bytes(given, 'ascii')
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result, "using unquote_to_bytes(): %r != %r"
% (expect, result))
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)
self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())
def test_unquoting_mixed_case(self):
# Test unquoting on mixed-case hex digits in the percent-escapes
given = '%Ab%eA'
expect = b'\xab\xea'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using quote(): %r != %r" % (expect, result))
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.parse.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %r != %r" % (expect, result))
def test_unquote_to_bytes(self):
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = b'br\xc3\xbcckner_sapporo_20050930.doc'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test on a string with unescaped non-ASCII characters
# (Technically an invalid URI; expect those characters to be UTF-8
# encoded).
result = urllib.parse.unquote_to_bytes("\u6f22%C3%BC")
expect = b'\xe6\xbc\xa2\xc3\xbc' # UTF-8 for "\u6f22\u00fc"
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input
given = b'%A2%D8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
# Test with a bytes as input, with unescaped non-ASCII bytes
# (Technically an invalid URI; expect those bytes to be preserved)
given = b'%A2\xd8ab%FF'
expect = b'\xa2\xd8ab\xff'
result = urllib.parse.unquote_to_bytes(given)
self.assertEqual(expect, result,
"using unquote_to_bytes(): %r != %r"
% (expect, result))
def test_unquote_with_unicode(self):
# Characters in the Latin-1 range, encoded with UTF-8
given = 'br%C3%BCckner_sapporo_20050930.doc'
expect = 'br\u00fcckner_sapporo_20050930.doc'
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with None (default)
result = urllib.parse.unquote(given, encoding=None, errors=None)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in the Latin-1 range, encoded with Latin-1
result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',
encoding="latin-1")
expect = 'br\u00fcckner_sapporo_20050930.doc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Characters in BMP, encoded with UTF-8
given = "%E6%BC%A2%E5%AD%97"
expect = "\u6f22\u5b57" # "Kanji"
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence
given = "%F3%B1"
expect = "\ufffd" # Replacement character
result = urllib.parse.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, replace errors
result = urllib.parse.unquote(given, errors="replace")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# Decode with UTF-8, invalid sequence, ignoring errors
given = "%F3%B1"
expect = ""
result = urllib.parse.unquote(given, errors="ignore")
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, UTF-8
result = urllib.parse.unquote("\u6f22%C3%BC")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
# A mix of non-ASCII and percent-encoded characters, Latin-1
# (Note, the string contains non-Latin-1-representable characters)
result = urllib.parse.unquote("\u6f22%FC", encoding="latin-1")
expect = '\u6f22\u00fc'
self.assertEqual(expect, result,
"using unquote(): %r != %r" % (expect, result))
def test_unquoting_with_bytes_input(self):
# Bytes not supported yet
with self.assertRaisesRegex(TypeError, 'Expected str, got bytes'):
given = b'bl\xc3\xa5b\xc3\xa6rsyltet\xc3\xb8y'
urllib.parse.unquote(given)
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.parse.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.parse.quote_plus(str(['1', '2', '3']))
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assertIn(expect, result)
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
def test_empty_sequence(self):
self.assertEqual("", urllib.parse.urlencode({}))
self.assertEqual("", urllib.parse.urlencode([]))
def test_nonstring_values(self):
self.assertEqual("a=1", urllib.parse.urlencode({"a": 1}))
self.assertEqual("a=None", urllib.parse.urlencode({"a": None}))
def test_nonstring_seq_values(self):
self.assertEqual("a=1&a=2", urllib.parse.urlencode({"a": [1, 2]}, True))
self.assertEqual("a=None&a=a",
urllib.parse.urlencode({"a": [None, "a"]}, True))
data = collections.OrderedDict([("a", 1), ("b", 1)])
self.assertEqual("a=a&a=b",
urllib.parse.urlencode({"a": data}, True))
def test_urlencode_encoding(self):
# ASCII encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Default is UTF-8 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
# Latin-1 encoding.
given = (('\u00a0', '\u00c1'),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_encoding_doseq(self):
# ASCII Encoding. Expect %3F with errors="replace'
given = (('\u00a0', '\u00c1'),)
expect = '%3F=%3F'
result = urllib.parse.urlencode(given, doseq=True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# ASCII Encoding. On a sequence of values.
given = (("\u00a0", (1, "\u00c1")),)
expect = '%3F=1&%3F=%3F'
result = urllib.parse.urlencode(given, True,
encoding="ASCII", errors="replace")
self.assertEqual(expect, result)
# Utf-8
given = (("\u00a0", "\u00c1"),)
expect = '%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%C2%A0=42&%C2%A0=%C3%81'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# latin-1
given = (("\u00a0", "\u00c1"),)
expect = '%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
given = (("\u00a0", (42, "\u00c1")),)
expect = '%A0=42&%A0=%C1'
result = urllib.parse.urlencode(given, True, encoding="latin-1")
self.assertEqual(expect, result)
def test_urlencode_bytes(self):
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0%24=%C1%24'
result = urllib.parse.urlencode(given)
self.assertEqual(expect, result)
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
# Sequence of values
given = ((b'\xa0\x24', (42, b'\xc1\x24')),)
expect = '%A0%24=42&%A0%24=%C1%24'
result = urllib.parse.urlencode(given, True)
self.assertEqual(expect, result)
def test_urlencode_encoding_safe_parameter(self):
# Send '$' (\x24) as safe character
# Default utf-8 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, doseq=True, safe=":$")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
# Safe parameter in sequence
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$")
self.assertEqual(expect, result)
# Test all above in latin-1 encoding
given = ((b'\xa0\x24', b'\xc1\x24'),)
result = urllib.parse.urlencode(given, safe=":$",
encoding="latin-1")
expect = '%A0$=%C1$'
self.assertEqual(expect, result)
given = ((b'\xa0\x24', b'\xc1\x24'),)
expect = '%A0$=%C1$'
result = urllib.parse.urlencode(given, doseq=True, safe=":$",
encoding="latin-1")
given = ((b'\xa0\x24', (b'\xc1\x24', 0xd, 42)),)
expect = '%A0$=%C1$&%A0$=13&%A0$=42'
result = urllib.parse.urlencode(given, True, safe=":$",
encoding="latin-1")
self.assertEqual(expect, result)
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.request.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.request.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.parse.quote("quot=ing")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.request.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.parse.quote("make sure")
result = urllib.request.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
@unittest.skipUnless(sys.platform == 'win32',
'test specific to the urllib.url2path function.')
def test_ntpath(self):
given = ('/C:/', '///C:/', '/C|//')
expect = 'C:\\'
for url in given:
result = urllib.request.url2pathname(url)
self.assertEqual(expect, result,
'urllib.request..url2pathname() failed; %s != %s' %
(expect, result))
given = '///C|/path'
expect = 'C:\\path'
result = urllib.request.url2pathname(given)
self.assertEqual(expect, result,
'urllib.request.url2pathname() failed; %s != %s' %
(expect, result))
class Utility_Tests(unittest.TestCase):
"""Testcase to test the various utility functions in the urllib."""
def test_thishost(self):
"""Test the urllib.request.thishost utility function returns a tuple"""
self.assertIsInstance(urllib.request.thishost(), tuple)
class URLopener_Tests(FakeHTTPMixin, unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.request.URLopener):
def open_spam(self, url):
return url
with support.check_warnings(
('DummyURLopener style of invoking requests is deprecated.',
DeprecationWarning)):
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# test the safe characters are not quoted by urlopen
self.assertEqual(DummyURLopener().open(
"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/"),
"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/")
@support.ignore_warnings(category=DeprecationWarning)
def test_urlopener_retrieve_file(self):
with support.temp_dir() as tmpdir:
fd, tmpfile = tempfile.mkstemp(dir=tmpdir)
os.close(fd)
fileurl = "file:" + urllib.request.pathname2url(tmpfile)
filename, _ = urllib.request.URLopener().retrieve(fileurl)
# Some buildbots have TEMP folder that uses a lowercase drive letter.
self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile))
@support.ignore_warnings(category=DeprecationWarning)
def test_urlopener_retrieve_remote(self):
url = "http://www.python.org/file.txt"
self.fakehttp(b"HTTP/1.1 200 OK\r\n\r\nHello!")
self.addCleanup(self.unfakehttp)
filename, _ = urllib.request.URLopener().retrieve(url)
self.assertEqual(os.path.splitext(filename)[1], ".txt")
@support.ignore_warnings(category=DeprecationWarning)
def test_local_file_open(self):
# bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme
class DummyURLopener(urllib.request.URLopener):
def open_local_file(self, url):
return url
for url in ('local_file://example', 'local-file://example'):
self.assertRaises(OSError, urllib.request.urlopen, url)
self.assertRaises(OSError, urllib.request.URLopener().open, url)
self.assertRaises(OSError, urllib.request.URLopener().retrieve, url)
self.assertRaises(OSError, DummyURLopener().open, url)
self.assertRaises(OSError, DummyURLopener().retrieve, url)
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, sometimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic environments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen()
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assertIsNone(socket.getdefaulttimeout())
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
class RequestTests(unittest.TestCase):
"""Unit tests for urllib.request.Request."""
def test_default_values(self):
Request = urllib.request.Request
request = Request("http://www.python.org")
self.assertEqual(request.get_method(), 'GET')
request = Request("http://www.python.org", {})
self.assertEqual(request.get_method(), 'POST')
def test_with_method_arg(self):
Request = urllib.request.Request
request = Request("http://www.python.org", method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", {}, method='HEAD')
self.assertEqual(request.method, 'HEAD')
self.assertEqual(request.get_method(), 'HEAD')
request = Request("http://www.python.org", method='GET')
self.assertEqual(request.get_method(), 'GET')
request.method = 'HEAD'
self.assertEqual(request.get_method(), 'HEAD')
class URL2PathNameTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(url2pathname("///C|"), 'C:')
self.assertEqual(url2pathname("///C:"), 'C:')
self.assertEqual(url2pathname("///C|/"), 'C:\\')
def test_converting_when_no_drive_letter(self):
# cannot end a raw string in \
self.assertEqual(url2pathname("///C/test/"), r'\\\C\test' '\\')
self.assertEqual(url2pathname("////C/test/"), r'\\C\test' '\\')
def test_simple_compare(self):
self.assertEqual(url2pathname("///C|/foo/bar/spam.foo"),
r'C:\foo\bar\spam.foo')
def test_non_ascii_drive_letter(self):
self.assertRaises(IOError, url2pathname, "///\u00e8|/")
def test_roundtrip_url2pathname(self):
list_of_paths = ['C:',
r'\\\C\test\\',
r'C:\foo\bar\spam.foo'
]
for path in list_of_paths:
self.assertEqual(url2pathname(pathname2url(path)), path)
class PathName2URLTests(unittest.TestCase):
def test_converting_drive_letter(self):
self.assertEqual(pathname2url("C:"), '///C:')
self.assertEqual(pathname2url("C:\\"), '///C:')
def test_converting_when_no_drive_letter(self):
self.assertEqual(pathname2url(r"\\\folder\test" "\\"),
'/////folder/test/')
self.assertEqual(pathname2url(r"\\folder\test" "\\"),
'////folder/test/')
self.assertEqual(pathname2url(r"\folder\test" "\\"),
'/folder/test/')
def test_simple_compare(self):
self.assertEqual(pathname2url(r'C:\foo\bar\spam.foo'),
"///C:/foo/bar/spam.foo" )
def test_long_drive_letter(self):
self.assertRaises(IOError, pathname2url, "XX:\\")
def test_roundtrip_pathname2url(self):
list_of_paths = ['///C:',
'/////folder/test/',
'///C:/foo/bar/spam.foo']
for path in list_of_paths:
self.assertEqual(pathname2url(url2pathname(path)), path)
if __name__ == '__main__':
unittest.main()
| 41.796178
| 108
| 0.589759
| 8,191
| 72,182
| 5.099622
| 0.123428
| 0.061406
| 0.030524
| 0.042661
| 0.576692
| 0.524598
| 0.472242
| 0.423021
| 0.37799
| 0.32975
| 0
| 0.025378
| 0.284323
| 72,182
| 1,726
| 109
| 41.820394
| 0.783213
| 0.16403
| 0
| 0.435668
| 0
| 0.008143
| 0.166513
| 0.032898
| 0
| 0
| 0.000368
| 0
| 0.219055
| 1
| 0.115635
| false
| 0.044788
| 0.013844
| 0.002443
| 0.162052
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8627cb215475c4cdba11abae1ef96d79eaf6f75a
| 440
|
py
|
Python
|
modules/dare.py
|
VeNoM-hubs/nyx
|
1d76b3ad50add2e71e70fac40699e0cb513b084e
|
[
"MIT"
] | null | null | null |
modules/dare.py
|
VeNoM-hubs/nyx
|
1d76b3ad50add2e71e70fac40699e0cb513b084e
|
[
"MIT"
] | 3
|
2020-10-16T16:23:02.000Z
|
2021-09-08T02:33:38.000Z
|
modules/dare.py
|
VeNoM-hubs/nyx
|
1d76b3ad50add2e71e70fac40699e0cb513b084e
|
[
"MIT"
] | 5
|
2020-10-14T04:03:27.000Z
|
2020-11-24T04:10:03.000Z
|
from discord.ext import commands
import json
import random
with open("assets/json/questions.json") as data:
data = json.load(data)
dares = data["dares"]
class Dare(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases=["d"])
async def dare(self, ctx):
dare = random.choice(dares)
await ctx.send(dare)
def setup(client):
client.add_cog(Dare(client))
| 19.130435
| 48
| 0.659091
| 60
| 440
| 4.75
| 0.533333
| 0.063158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213636
| 440
| 22
| 49
| 20
| 0.823699
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 0.059091
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
862895e0beee8139d3bebfdbda1b874ae1ecc23b
| 18,880
|
py
|
Python
|
szh_objects.py
|
ipqhjjybj/bitcoin_trend_strategy
|
0c85055558591574a4171abd68142ebbeb502958
|
[
"MIT"
] | 4
|
2019-10-07T13:24:35.000Z
|
2020-12-03T19:03:15.000Z
|
szh_objects.py
|
ipqhjjybj/bitcoin_trend_strategy
|
0c85055558591574a4171abd68142ebbeb502958
|
[
"MIT"
] | 1
|
2019-10-08T07:11:30.000Z
|
2019-10-08T07:11:30.000Z
|
szh_objects.py
|
ipqhjjybj/bitcoin_trend_strategy
|
0c85055558591574a4171abd68142ebbeb502958
|
[
"MIT"
] | 2
|
2019-12-15T03:50:57.000Z
|
2021-05-25T15:44:05.000Z
|
# encoding: utf-8
import sys
from market_maker import OrderManager
from settings import *
import os
from pymongo import MongoClient, ASCENDING
from pymongo.errors import ConnectionFailure
from datetime import datetime , timedelta
import numpy as np
########################################################################################################################
# constants
EXCHANGE_BITMEX = "BITMEX"
EMPTY_STRING = ""
EMPTY_FLOAT = 0.0
EMPTY_INT = 0
#----------------------------------------------------------------------
class LoggerEngine(object):
LogDir = "LogDir"
#----------------------------------------------------------------------
def __init__(self, logName , in_debug = True , open_md = "w"):
if os.path.exists(self.LogDir) == False:
os.mkdir( self.LogDir )
self.logPath = os.path.join(self.LogDir , logName)
self.now_debug = in_debug
if self.now_debug:
self.f = open( self.logPath , open_md)
#----------------------------------------------------------------------
def error(self, msg , error_id):
if self.now_debug:
self.f.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " : " + "Error msg %s: %s " % (str(error_id) , msg) + "\n")
self.f.flush()
#----------------------------------------------------------------------
def info(self, msg):
if self.now_debug:
self.f.write(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " : " + msg + "\n")
self.f.flush()
#----------------------------------------------------------------------
def close(self):
self.f.close()
'''
tick 数据的格式
'''
class TickData(object):
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(TickData, self).__init__()
# 代码相关
self.symbol = EMPTY_STRING # 合约代码
self.exchange = EMPTY_STRING # 交易所代码
self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码
# 成交数据
self.lastPrice = EMPTY_FLOAT # 最新成交价
self.lastVolume = EMPTY_INT # 最新成交量
self.volume = EMPTY_INT # 今天总成交量
self.openInterest = EMPTY_INT # 持仓量
self.time = EMPTY_STRING # 时间 11:20:56.5
self.date = EMPTY_STRING # 日期 20151009
self.datetime = None # python的datetime时间对象
# 常规行情
self.openPrice = EMPTY_FLOAT # 今日开盘价
self.highPrice = EMPTY_FLOAT # 今日最高价
self.lowPrice = EMPTY_FLOAT # 今日最低价
self.preClosePrice = EMPTY_FLOAT
self.upperLimit = EMPTY_FLOAT # 涨停价
self.lowerLimit = EMPTY_FLOAT # 跌停价
# 五档行情
self.bidPrice1 = EMPTY_FLOAT
self.bidPrice2 = EMPTY_FLOAT
self.bidPrice3 = EMPTY_FLOAT
self.bidPrice4 = EMPTY_FLOAT
self.bidPrice5 = EMPTY_FLOAT
self.askPrice1 = EMPTY_FLOAT
self.askPrice2 = EMPTY_FLOAT
self.askPrice3 = EMPTY_FLOAT
self.askPrice4 = EMPTY_FLOAT
self.askPrice5 = EMPTY_FLOAT
self.bidVolume1 = EMPTY_INT
self.bidVolume2 = EMPTY_INT
self.bidVolume3 = EMPTY_INT
self.bidVolume4 = EMPTY_INT
self.bidVolume5 = EMPTY_INT
self.askVolume1 = EMPTY_INT
self.askVolume2 = EMPTY_INT
self.askVolume3 = EMPTY_INT
self.askVolume4 = EMPTY_INT
self.askVolume5 = EMPTY_INT
########################################################################
class BarData(object):
"""K线数据"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(BarData, self).__init__()
self.vtSymbol = EMPTY_STRING # vt系统代码
self.symbol = EMPTY_STRING # 代码
self.exchange = EMPTY_STRING # 交易所
self.open = EMPTY_FLOAT # OHLC
self.high = EMPTY_FLOAT
self.low = EMPTY_FLOAT
self.close = EMPTY_FLOAT
self.date = EMPTY_STRING # bar开始的时间,日期
self.time = EMPTY_STRING # 时间
self.datetime = None # python的datetime时间对象
self.volume = EMPTY_INT # 成交量
self.openInterest = EMPTY_INT # 持仓量
'''
engine的基础类
'''
class EngineBase(object):
#----------------------------------------------------------------------
def writeLog(self, content):
if self.logger:
self.logger.info(content)
#----------------------------------------------------------------------
def writeError(self, content , error_id = 0):
"""
发送错误通知/记录日志文件
:param content:
:return:
"""
if self.logger:
self.logger.error(content , error_id)
'''
主要Engine
'''
class DataEngine(EngineBase):
#----------------------------------------------------------------------
def __init__(self , _host = GLOBAL_MONGO_HOST , _port = GLOBAL_MONGO_PORT):
super(DataEngine, self).__init__()
self.host = _host
self.port = _port
# MongoDB数据库相关
self.dbClient = None # MongoDB客户端对象
self.logger = LoggerEngine("dataEngine.log")
## init the db
self.dbConnect()
#----------------------------------------------------------------------
def dbConnect(self):
"""连接MongoDB数据库"""
if not self.dbClient:
# 读取MongoDB的设置
try:
# 设置MongoDB操作的超时时间为0.5秒
self.dbClient = MongoClient(self.host , self.port , connectTimeoutMS=500)
# 调用server_info查询服务器状态,防止服务器异常并未连接成功
self.dbClient.server_info()
self.writeLog(u'database connection error')
except ConnectionFailure:
self.writeLog( u'fail in db connection')
#----------------------------------------------------------------------
def dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING):
"""从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针"""
if self.dbClient:
db = self.dbClient[dbName]
collection = db[collectionName]
if sortKey:
cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序
else:
cursor = collection.find(d)
if cursor:
return list(cursor)
else:
return []
else:
self.writeLog(u'db query failed')
return []
#-----------------------------------------------------------------------
def loadBars( self, dbName = GLOBAL_USE_DBNAME , collectionName = GLOBAL_USE_SYMBOL, days = 2):
today_datetime = datetime.now()
start_datetime = today_datetime - timedelta( days = days)
d = {'datetime':{'$gte':start_datetime , '$lte':today_datetime}}
barData = self.dbQuery(dbName, collectionName, d, 'datetime')
l = []
for d in barData:
bar = BarData()
bar.__dict__ = d
l.append(bar)
return l
########################################################################
class BarManager(object):
"""
K线合成器,支持:
1. 基于Tick合成1分钟K线
2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60)
"""
#----------------------------------------------------------------------
def __init__(self, onBar, xsec=0, onXsecBar=None , xmin=0 , xhour=0, onXminBar=None , onXhourBar = None, onDayBar=None):
"""Constructor"""
self.bar = None # 1分钟K线对象
self.onBar = onBar # 1分钟K线回调函数
self.xsecBar = None # 多少秒K线对象
self.xsec = xsec # xsec的值
self.onXsecBar = onXsecBar # x秒的回调函数
self.xminBar = None # X分钟K线对象
self.xmin = xmin # X的值
self.onXminBar = onXminBar # X分钟K线的回调函数
self.xhourBar = None # x小时K线对象
self.xhour = xhour # x的值
self.onXhourBar = onXhourBar # x小时K线的回调函数
self.lastTick = None # 上一TICK缓存对象
self.lastSecondTick = None # 用于秒级别的上一根Tick缓存对象
self.dayBar = None # 一个交易日的bar对象
self.onDayBar = onDayBar # 交易日K线的回调函数
self.lastDayBar = None
#----------------------------------------------------------------------
def updateTick(self, tick):
"""TICK更新"""
newMinute = False # 默认不是新的一分钟
# 尚未创建对象
if not self.bar:
self.bar = BarData()
newMinute = True
# 新的一分钟
elif self.bar.datetime.minute != tick.datetime.minute:
# 生成上一分钟K线的时间戳
self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0
self.bar.date = self.bar.datetime.strftime('%Y%m%d')
self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f')
# 推送已经结束的上一分钟K线
self.onBar(self.bar)
# 创建新的K线对象
self.bar = BarData()
newMinute = True
# 初始化新一分钟的K线数据
if newMinute:
self.bar.vtSymbol = tick.vtSymbol
self.bar.symbol = tick.symbol
self.bar.exchange = tick.exchange
self.bar.open = tick.lastPrice
self.bar.high = tick.lastPrice
self.bar.low = tick.lastPrice
# 累加更新老一分钟的K线数据
else:
self.bar.high = max(self.bar.high, tick.lastPrice)
self.bar.low = min(self.bar.low, tick.lastPrice)
# 通用更新部分
self.bar.close = tick.lastPrice
self.bar.datetime = tick.datetime
self.bar.openInterest = tick.openInterest
if self.lastTick:
self.bar.volume += (tick.volume - self.lastTick.volume) # 当前K线内的成交量
# 缓存Tick
self.lastTick = tick
#----------------------------------------------------------------------
def updateSecond(self, tick ):
"""通过TICK数据更新到秒数据"""
newSecond = False
if not self.xsecBar:
self.xsecBar = BarData()
newSecond = True
elif self.xsecBar.datetime.second != tick.datetime.second and ( (tick.datetime.second) % self.xsec == 0 ):
self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0
self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d')
self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f')
# 推送已经结束的上多少秒K线
self.onXsecBar(self.xsecBar)
# 清空老K线缓存对象
self.xsecBar = BarData()
newSecond = True
# 初始化新多少秒的K线数据
if newSecond :
self.xsecBar.datetime = tick.datetime
self.xsecBar.vtSymbol = tick.vtSymbol
self.xsecBar.symbol = tick.symbol
self.xsecBar.exchange = tick.exchange
self.xsecBar.open = tick.lastPrice
self.xsecBar.high = tick.lastPrice
self.xsecBar.low = tick.lastPrice
# 累加更新老几秒的K线数据
else:
self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice)
self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice)
# 通用更新部分
self.xsecBar.close = tick.lastPrice
self.xsecBar.openInterest = tick.openInterest
if self.lastSecondTick:
self.xsecBar.volume += (tick.volume - self.lastSecondTick.volume) # 当前Tick内的成交量
# 缓存 secondTick 对象
self.lastSecondTick = tick
#----------------------------------------------------------------------
def updateBar(self, bar):
"""1分钟K线更新"""
# 尚未创建对象
if not self.xminBar:
self.xminBar = BarData()
self.xminBar.vtSymbol = bar.vtSymbol
self.xminBar.symbol = bar.symbol
self.xminBar.exchange = bar.exchange
self.xminBar.open = bar.open
self.xminBar.high = bar.high
self.xminBar.low = bar.low
self.xminBar.datetime = bar.datetime
# 累加老K线
else:
self.xminBar.high = max(self.xminBar.high, bar.high)
self.xminBar.low = min(self.xminBar.low, bar.low)
# 通用部分
self.xminBar.close = bar.close
self.xminBar.openInterest = bar.openInterest
self.xminBar.volume += float(bar.volume)
# X分钟已经走完
if ( (bar.datetime.minute + 1) % self.xmin ) == 0: # 可以用X整除
# 生成上一X分钟K线的时间戳
self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0
self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d')
self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S')
# 推送
self.onXminBar(self.xminBar)
# 清空老K线缓存对象
self.xminBar = None
#----------------------------------------------------------------------
def updateHourBar(self , bar):
"""1小时K线更新"""
# 尚未创建对象
if not self.xhourBar:
self.xhourBar = BarData()
self.xhourBar.vtSymbol = bar.vtSymbol
self.xhourBar.symbol = bar.symbol
self.xhourBar.exchange = bar.exchange
self.xhourBar.open = bar.open
self.xhourBar.high = bar.high
self.xhourBar.low = bar.low
self.xhourBar.datetime = bar.datetime
else:
self.xhourBar.high = max(self.xhourBar.high, bar.high)
self.xhourBar.low = min(self.xhourBar.low, bar.low)
# 通用部分
self.xhourBar.close = bar.close
self.xhourBar.openInterest = bar.openInterest
self.xhourBar.volume += float(bar.volume)
# X分钟已经走完
if ( (bar.datetime.hour + 1) % self.xhour ) == 0: # 可以用X整除
# 生成上一X分钟K线的时间戳
self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0
self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d')
self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S')
# 推送
self.onXhourBar(self.xhourBar)
# 清空老K线缓存对象
self.xhourBar = None
#----------------------------------------------------------------------------
def updateDayBar(self, bar):
# 一天走完
# 1. 夜盘 , 2.第二天9点
if self.lastDayBar != None \
and ( (self.lastDayBar.time <= "15:30:00" and bar.time >= "15:30:00") \
or (self.lastDayBar.time <= "15:30:00" and bar.time <= self.lastDayBar.time )):
self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0
self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d')
self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S')
# 说明是新的一天了
# 先推送昨天过去
self.onDayBar( self.dayBar)
self.dayBar = BarData()
self.dayBar.vtSymbol = bar.vtSymbol
self.dayBar.symbol = bar.symbol
self.dayBar.exchange = bar.exchange
self.dayBar.open = bar.open
self.dayBar.high = bar.high
self.dayBar.low = bar.low
self.dayBar.datetime = bar.datetime
elif not self.dayBar:
self.dayBar = BarData()
self.dayBar.vtSymbol = bar.vtSymbol
self.dayBar.symbol = bar.symbol
self.dayBar.exchange = bar.exchange
self.dayBar.open = bar.open
self.dayBar.high = bar.high
self.dayBar.low = bar.low
self.dayBar.datetime = bar.datetime
else:
self.dayBar.high = max(self.dayBar.high , bar.high)
self.dayBar.low = min(self.dayBar.low , bar.low)
# 通用部分
self.dayBar.close = bar.close
self.dayBar.openInterest = bar.openInterest
self.dayBar.volume += float(bar.volume)
self.lastDayBar = bar
########################################################################
class ArrayManager(object):
"""
K线序列管理工具,负责:
1. K线时间序列的维护
2. 常用技术指标的计算
"""
#----------------------------------------------------------------------
def __init__(self, size=100):
"""Constructor"""
self.count = 0 # 缓存计数
self.size = size # 缓存大小
self.inited = False # True if count>=size
self.openArray = np.zeros(size) # OHLC
self.highArray = np.zeros(size)
self.lowArray = np.zeros(size)
self.closeArray = np.zeros(size)
self.volumeArray = np.zeros(size)
#----------------------------------------------------------------------
def updateBar(self, bar):
"""更新K线"""
self.count += 1
if not self.inited and self.count >= self.size:
self.inited = True
self.openArray[0:self.size-1] = self.openArray[1:self.size]
self.highArray[0:self.size-1] = self.highArray[1:self.size]
self.lowArray[0:self.size-1] = self.lowArray[1:self.size]
self.closeArray[0:self.size-1] = self.closeArray[1:self.size]
self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size]
self.openArray[-1] = bar.open
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.closeArray[-1] = bar.close
self.volumeArray[-1] = bar.volume
#----------------------------------------------------------------------
@property
def open(self):
"""获取开盘价序列"""
return self.openArray
#----------------------------------------------------------------------
@property
def high(self):
"""获取最高价序列"""
return self.highArray
#----------------------------------------------------------------------
@property
def low(self):
"""获取最低价序列"""
return self.lowArray
#----------------------------------------------------------------------
@property
def close(self):
"""获取收盘价序列"""
return self.closeArray
#----------------------------------------------------------------------
@property
def volume(self):
"""获取成交量序列"""
return self.volumeArray
| 33.895871
| 131
| 0.476536
| 1,719
| 18,880
| 5.162885
| 0.191972
| 0.036056
| 0.022085
| 0.008676
| 0.238423
| 0.160225
| 0.136789
| 0.131155
| 0.063775
| 0.056113
| 0
| 0.010472
| 0.317161
| 18,880
| 556
| 132
| 33.956835
| 0.67794
| 0.164089
| 0
| 0.22291
| 0
| 0
| 0.018082
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077399
| false
| 0
| 0.024768
| 0
| 0.154799
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8628a8ccf18c32191b9cace42141414df8e8de89
| 7,864
|
py
|
Python
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/pylint/pyreverse/writer.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/pylint/pyreverse/writer.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
CodeAnalysis/SourceMeter_Interface/SourceMeter-8.2.0-x64-linux/Python/Tools/python/pylint/pyreverse/writer.py
|
ishtjot/susereumutep
|
56e20c1777e0c938ac42bd8056f84af9e0b76e46
|
[
"Apache-2.0"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Utilities for creating VCG and Dot diagrams"""
from logilab.common.vcgutils import VCGPrinter
from logilab.common.graph import DotBackend
from pylint.pyreverse.utils import is_exception
class DiagramWriter(object):
"""base class for writing project diagrams
"""
def __init__(self, config, styles):
self.config = config
self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges = styles
self.printer = None # defined in set_printer
def write(self, diadefs):
"""write files for <project> according to <diadefs>
"""
for diagram in diadefs:
basename = diagram.title.strip().replace(' ', '_')
file_name = '%s.%s' % (basename, self.config.output_format)
self.set_printer(file_name, basename)
if diagram.TYPE == 'class':
self.write_classes(diagram)
else:
self.write_packages(diagram)
self.close_graph()
def write_packages(self, diagram):
"""write a package diagram"""
# sorted to get predictable (hence testable) results
for i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)):
self.printer.emit_node(i, label=self.get_title(obj), shape='box')
obj.fig_id = i
# package dependencies
for rel in diagram.get_relationships('depends'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
**self.pkg_edges)
def write_classes(self, diagram):
"""write a class diagram"""
# sorted to get predictable (hence testable) results
for i, obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)):
self.printer.emit_node(i, **self.get_values(obj))
obj.fig_id = i
# inheritance links
for rel in diagram.get_relationships('specialization'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
**self.inh_edges)
# implementation links
for rel in diagram.get_relationships('implements'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
**self.imp_edges)
# generate associations
for rel in diagram.get_relationships('association'):
self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,
label=rel.name, **self.ass_edges)
def set_printer(self, file_name, basename):
"""set printer"""
raise NotImplementedError
def get_title(self, obj):
"""get project title"""
raise NotImplementedError
def get_values(self, obj):
"""get label and shape for classes."""
raise NotImplementedError
def close_graph(self):
"""finalize the graph"""
raise NotImplementedError
class DotWriter(DiagramWriter):
"""write dot graphs from a diagram definition and a project
"""
def __init__(self, config):
styles = [dict(arrowtail='none', arrowhead="open"),
dict(arrowtail='none', arrowhead='empty'),
dict(arrowtail='node', arrowhead='empty', style='dashed'),
dict(fontcolor='green', arrowtail='none',
arrowhead='diamond', style='solid'),
]
DiagramWriter.__init__(self, config, styles)
def set_printer(self, file_name, basename):
"""initialize DotWriter and add options for layout.
"""
layout = dict(rankdir="BT")
self.printer = DotBackend(basename, additionnal_param=layout)
self.file_name = file_name
def get_title(self, obj):
"""get project title"""
return obj.title
def get_values(self, obj):
"""get label and shape for classes.
The label contains all attributes and methods
"""
label = obj.title
if obj.shape == 'interface':
label = u'«interface»\\n%s' % label
if not self.config.only_classnames:
label = r'%s|%s\l|' % (label, r'\l'.join(obj.attrs))
for func in obj.methods:
label = r'%s%s()\l' % (label, func.name)
label = '{%s}' % label
if is_exception(obj.node):
return dict(fontcolor='red', label=label, shape='record')
return dict(label=label, shape='record')
def close_graph(self):
"""print the dot graph into <file_name>"""
self.printer.generate(self.file_name)
class VCGWriter(DiagramWriter):
"""write vcg graphs from a diagram definition and a project
"""
def __init__(self, config):
styles = [dict(arrowstyle='solid', backarrowstyle='none',
backarrowsize=0),
dict(arrowstyle='solid', backarrowstyle='none',
backarrowsize=10),
dict(arrowstyle='solid', backarrowstyle='none',
linestyle='dotted', backarrowsize=10),
dict(arrowstyle='solid', backarrowstyle='none',
textcolor='green'),
]
DiagramWriter.__init__(self, config, styles)
def set_printer(self, file_name, basename):
"""initialize VCGWriter for a UML graph"""
self.graph_file = open(file_name, 'w+')
self.printer = VCGPrinter(self.graph_file)
self.printer.open_graph(title=basename, layoutalgorithm='dfs',
late_edge_labels='yes', port_sharing='no',
manhattan_edges='yes')
self.printer.emit_node = self.printer.node
self.printer.emit_edge = self.printer.edge
def get_title(self, obj):
"""get project title in vcg format"""
return r'\fb%s\fn' % obj.title
def get_values(self, obj):
"""get label and shape for classes.
The label contains all attributes and methods
"""
if is_exception(obj.node):
label = r'\fb\f09%s\fn' % obj.title
else:
label = r'\fb%s\fn' % obj.title
if obj.shape == 'interface':
shape = 'ellipse'
else:
shape = 'box'
if not self.config.only_classnames:
attrs = obj.attrs
methods = [func.name for func in obj.methods]
# box width for UML like diagram
maxlen = max(len(name) for name in [obj.title] + methods + attrs)
line = '_' * (maxlen + 2)
label = r'%s\n\f%s' % (label, line)
for attr in attrs:
label = r'%s\n\f08%s' % (label, attr)
if attrs:
label = r'%s\n\f%s' % (label, line)
for func in methods:
label = r'%s\n\f10%s()' % (label, func)
return dict(label=label, shape=shape)
def close_graph(self):
"""close graph and file"""
self.printer.close_graph()
self.graph_file.close()
| 39.32
| 82
| 0.594863
| 956
| 7,864
| 4.783473
| 0.270921
| 0.038487
| 0.026241
| 0.021867
| 0.408703
| 0.366062
| 0.288869
| 0.243166
| 0.221518
| 0.211896
| 0
| 0.00594
| 0.293489
| 7,864
| 199
| 83
| 39.517588
| 0.816775
| 0.229654
| 0
| 0.327869
| 0
| 0
| 0.054201
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147541
| false
| 0
| 0.02459
| 0
| 0.237705
| 0.147541
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86291f58eceea662a0595f262f1d06df3c3cd46d
| 1,070
|
py
|
Python
|
graphql-ml-serving/backend/mutations.py
|
philippe-heitzmann/python-apps
|
1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a
|
[
"MIT"
] | 13
|
2021-05-23T15:47:24.000Z
|
2022-03-24T16:22:14.000Z
|
graphql-ml-serving/backend/mutations.py
|
philippe-heitzmann/python-apps
|
1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a
|
[
"MIT"
] | 4
|
2021-11-16T20:44:55.000Z
|
2022-01-13T19:13:38.000Z
|
graphql-ml-serving/backend/mutations.py
|
philippe-heitzmann/python-apps
|
1cc6e5e9b9ac81c81a3d4f0e420ff488fe6b2f0a
|
[
"MIT"
] | 11
|
2021-01-31T06:18:10.000Z
|
2021-11-21T00:02:05.000Z
|
import logging
from ariadne import MutationType, convert_kwargs_to_snake_case
from config import clients, messages, queue
mutation = MutationType()
@mutation.field("createMessage")
@convert_kwargs_to_snake_case
async def resolve_create_message(obj, info, content, client_id):
try:
message = {"content": content, "client_id": client_id}
messages.append(message)
await queue.put(message)
return {"success": True, "message": message}
except Exception as error:
return {"success": False, "errors": [str(error)]}
@mutation.field("createClient")
@convert_kwargs_to_snake_case
async def resolve_create_client(obj, info, client_id):
try:
logging.info(f"Client id: {client_id}")
if not clients.get(client_id):
client = {"client_id": client_id}
clients[client_id] = client
return {"success": True, "client": client}
return {"success": False, "errors": ["Client is taken"]}
except Exception as error:
return {"success": False, "errors": [str(error)]}
| 32.424242
| 64
| 0.673832
| 129
| 1,070
| 5.395349
| 0.372093
| 0.114943
| 0.100575
| 0.086207
| 0.318966
| 0.284483
| 0.284483
| 0.284483
| 0.284483
| 0.155172
| 0
| 0
| 0.205607
| 1,070
| 32
| 65
| 33.4375
| 0.818824
| 0
| 0
| 0.307692
| 0
| 0
| 0.142991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.115385
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8629a20e8661d77754338b9cfeef38848a59f1c8
| 18,336
|
py
|
Python
|
hc/api/transports.py
|
MaxwellDPS/healthchecks
|
3730c67c803e707ae51b01bacf2929bd053ee22f
|
[
"BSD-3-Clause"
] | 1
|
2020-06-08T12:22:51.000Z
|
2020-06-08T12:22:51.000Z
|
hc/api/transports.py
|
pathcl/healthchecks
|
ffc45f0c74694d06679aefe3b92a0b0778473ca7
|
[
"BSD-3-Clause"
] | 5
|
2021-03-19T11:20:11.000Z
|
2021-09-22T19:36:18.000Z
|
hc/api/transports.py
|
MaxwellDPS/healthchecks
|
3730c67c803e707ae51b01bacf2929bd053ee22f
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import timezone
import json
import requests
from urllib.parse import quote, urlencode
from hc.accounts.models import Profile
from hc.lib import emails
from hc.lib.string import replace
try:
import apprise
except ImportError:
# Enforce
settings.APPRISE_ENABLED = False
def tmpl(template_name, **ctx):
template_path = "integrations/%s" % template_name
# \xa0 is non-breaking space. It causes SMS messages to use UCS2 encoding
# and cost twice the money.
return render_to_string(template_path, ctx).strip().replace("\xa0", " ")
class Transport(object):
def __init__(self, channel):
self.channel = channel
def notify(self, check):
""" Send notification about current status of the check.
This method returns None on success, and error message
on error.
"""
raise NotImplementedError()
def is_noop(self, check):
""" Return True if transport will ignore check's current status.
This method is overridden in Webhook subclass where the user can
configure webhook urls for "up" and "down" events, and both are
optional.
"""
return False
def checks(self):
return self.channel.project.check_set.order_by("created")
class Email(Transport):
def notify(self, check, bounce_url):
if not self.channel.email_verified:
return "Email not verified"
unsub_link = self.channel.get_unsub_link()
headers = {
"X-Bounce-Url": bounce_url,
"List-Unsubscribe": "<%s>" % unsub_link,
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
}
try:
# Look up the sorting preference for this email address
p = Profile.objects.get(user__email=self.channel.email_value)
sort = p.sort
except Profile.DoesNotExist:
# Default sort order is by check's creation time
sort = "created"
# list() executes the query, to avoid DB access while
# rendering a template
ctx = {
"check": check,
"checks": list(self.checks()),
"sort": sort,
"now": timezone.now(),
"unsub_link": unsub_link,
}
emails.alert(self.channel.email_value, ctx, headers)
def is_noop(self, check):
if not self.channel.email_verified:
return True
if check.status == "down":
return not self.channel.email_notify_down
else:
return not self.channel.email_notify_up
class Shell(Transport):
def prepare(self, template, check):
""" Replace placeholders with actual values. """
ctx = {
"$CODE": str(check.code),
"$STATUS": check.status,
"$NOW": timezone.now().replace(microsecond=0).isoformat(),
"$NAME": check.name,
"$TAGS": check.tags,
}
for i, tag in enumerate(check.tags_list()):
ctx["$TAG%d" % (i + 1)] = tag
return replace(template, ctx)
def is_noop(self, check):
if check.status == "down" and not self.channel.cmd_down:
return True
if check.status == "up" and not self.channel.cmd_up:
return True
return False
def notify(self, check):
if not settings.SHELL_ENABLED:
return "Shell commands are not enabled"
if check.status == "up":
cmd = self.channel.cmd_up
elif check.status == "down":
cmd = self.channel.cmd_down
cmd = self.prepare(cmd, check)
code = os.system(cmd)
if code != 0:
return "Command returned exit code %d" % code
class HttpTransport(Transport):
@classmethod
def get_error(cls, response):
# Override in subclasses: look for a specific error message in the
# response and return it.
return None
@classmethod
def _request(cls, method, url, **kwargs):
try:
options = dict(kwargs)
options["timeout"] = 5
if "headers" not in options:
options["headers"] = {}
if "User-Agent" not in options["headers"]:
options["headers"]["User-Agent"] = "healthchecks.io"
r = requests.request(method, url, **options)
if r.status_code not in (200, 201, 202, 204):
m = cls.get_error(r)
if m:
return f'Received status code {r.status_code} with a message: "{m}"'
return f"Received status code {r.status_code}"
except requests.exceptions.Timeout:
# Well, we tried
return "Connection timed out"
except requests.exceptions.ConnectionError:
return "Connection failed"
@classmethod
def get(cls, url, **kwargs):
# Make 3 attempts--
for x in range(0, 3):
error = cls._request("get", url, **kwargs)
if error is None:
break
return error
@classmethod
def post(cls, url, **kwargs):
# Make 3 attempts--
for x in range(0, 3):
error = cls._request("post", url, **kwargs)
if error is None:
break
return error
@classmethod
def put(cls, url, **kwargs):
# Make 3 attempts--
for x in range(0, 3):
error = cls._request("put", url, **kwargs)
if error is None:
break
return error
class Webhook(HttpTransport):
def prepare(self, template, check, urlencode=False):
""" Replace variables with actual values. """
def safe(s):
return quote(s) if urlencode else s
ctx = {
"$CODE": str(check.code),
"$STATUS": check.status,
"$NOW": safe(timezone.now().replace(microsecond=0).isoformat()),
"$NAME": safe(check.name),
"$TAGS": safe(check.tags),
}
for i, tag in enumerate(check.tags_list()):
ctx["$TAG%d" % (i + 1)] = safe(tag)
return replace(template, ctx)
def is_noop(self, check):
if check.status == "down" and not self.channel.url_down:
return True
if check.status == "up" and not self.channel.url_up:
return True
return False
def notify(self, check):
spec = self.channel.webhook_spec(check.status)
if not spec["url"]:
return "Empty webhook URL"
url = self.prepare(spec["url"], check, urlencode=True)
headers = {}
for key, value in spec["headers"].items():
headers[key] = self.prepare(value, check)
body = spec["body"]
if body:
body = self.prepare(body, check)
if spec["method"] == "GET":
return self.get(url, headers=headers)
elif spec["method"] == "POST":
return self.post(url, data=body.encode(), headers=headers)
elif spec["method"] == "PUT":
return self.put(url, data=body.encode(), headers=headers)
class Slack(HttpTransport):
def notify(self, check):
text = tmpl("slack_message.json", check=check)
payload = json.loads(text)
return self.post(self.channel.slack_webhook_url, json=payload)
class HipChat(HttpTransport):
def is_noop(self, check):
return True
class OpsGenie(HttpTransport):
@classmethod
def get_error(cls, response):
try:
return response.json().get("message")
except ValueError:
pass
def notify(self, check):
headers = {
"Conent-Type": "application/json",
"Authorization": "GenieKey %s" % self.channel.opsgenie_key,
}
payload = {"alias": str(check.code), "source": settings.SITE_NAME}
if check.status == "down":
payload["tags"] = check.tags_list()
payload["message"] = tmpl("opsgenie_message.html", check=check)
payload["note"] = tmpl("opsgenie_note.html", check=check)
payload["description"] = tmpl("opsgenie_description.html", check=check)
url = "https://api.opsgenie.com/v2/alerts"
if self.channel.opsgenie_region == "eu":
url = "https://api.eu.opsgenie.com/v2/alerts"
if check.status == "up":
url += "/%s/close?identifierType=alias" % check.code
return self.post(url, json=payload, headers=headers)
class PagerDuty(HttpTransport):
URL = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
def notify(self, check):
description = tmpl("pd_description.html", check=check)
payload = {
"service_key": self.channel.pd_service_key,
"incident_key": str(check.code),
"event_type": "trigger" if check.status == "down" else "resolve",
"description": description,
"client": settings.SITE_NAME,
"client_url": check.details_url(),
}
return self.post(self.URL, json=payload)
class PagerTree(HttpTransport):
def notify(self, check):
url = self.channel.value
headers = {"Conent-Type": "application/json"}
payload = {
"incident_key": str(check.code),
"event_type": "trigger" if check.status == "down" else "resolve",
"title": tmpl("pagertree_title.html", check=check),
"description": tmpl("pagertree_description.html", check=check),
"client": settings.SITE_NAME,
"client_url": settings.SITE_ROOT,
"tags": ",".join(check.tags_list()),
}
return self.post(url, json=payload, headers=headers)
class PagerTeam(HttpTransport):
def notify(self, check):
url = self.channel.value
headers = {"Content-Type": "application/json"}
payload = {
"incident_key": str(check.code),
"event_type": "trigger" if check.status == "down" else "resolve",
"title": tmpl("pagerteam_title.html", check=check),
"description": tmpl("pagerteam_description.html", check=check),
"client": settings.SITE_NAME,
"client_url": settings.SITE_ROOT,
"tags": ",".join(check.tags_list()),
}
return self.post(url, json=payload, headers=headers)
class Pushbullet(HttpTransport):
def notify(self, check):
text = tmpl("pushbullet_message.html", check=check)
url = "https://api.pushbullet.com/v2/pushes"
headers = {
"Access-Token": self.channel.value,
"Conent-Type": "application/json",
}
payload = {"type": "note", "title": settings.SITE_NAME, "body": text}
return self.post(url, json=payload, headers=headers)
class Pushover(HttpTransport):
URL = "https://api.pushover.net/1/messages.json"
def notify(self, check):
others = self.checks().filter(status="down").exclude(code=check.code)
# list() executes the query, to avoid DB access while
# rendering a template
ctx = {"check": check, "down_checks": list(others)}
text = tmpl("pushover_message.html", **ctx)
title = tmpl("pushover_title.html", **ctx)
pieces = self.channel.value.split("|")
user_key, prio = pieces[0], pieces[1]
# The third element, if present, is the priority for "up" events
if len(pieces) == 3 and check.status == "up":
prio = pieces[2]
payload = {
"token": settings.PUSHOVER_API_TOKEN,
"user": user_key,
"message": text,
"title": title,
"html": 1,
"priority": int(prio),
}
# Emergency notification
if prio == "2":
payload["retry"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY
payload["expire"] = settings.PUSHOVER_EMERGENCY_EXPIRATION
return self.post(self.URL, data=payload)
class VictorOps(HttpTransport):
def notify(self, check):
description = tmpl("victorops_description.html", check=check)
mtype = "CRITICAL" if check.status == "down" else "RECOVERY"
payload = {
"entity_id": str(check.code),
"message_type": mtype,
"entity_display_name": check.name_then_code(),
"state_message": description,
"monitoring_tool": settings.SITE_NAME,
}
return self.post(self.channel.value, json=payload)
class Matrix(HttpTransport):
def get_url(self):
s = quote(self.channel.value)
url = settings.MATRIX_HOMESERVER
url += "/_matrix/client/r0/rooms/%s/send/m.room.message?" % s
url += urlencode({"access_token": settings.MATRIX_ACCESS_TOKEN})
return url
def notify(self, check):
plain = tmpl("matrix_description.html", check=check)
formatted = tmpl("matrix_description_formatted.html", check=check)
payload = {
"msgtype": "m.text",
"body": plain,
"format": "org.matrix.custom.html",
"formatted_body": formatted,
}
return self.post(self.get_url(), json=payload)
class Discord(HttpTransport):
def notify(self, check):
text = tmpl("slack_message.json", check=check)
payload = json.loads(text)
url = self.channel.discord_webhook_url + "/slack"
return self.post(url, json=payload)
class Telegram(HttpTransport):
SM = "https://api.telegram.org/bot%s/sendMessage" % settings.TELEGRAM_TOKEN
@classmethod
def get_error(cls, response):
try:
return response.json().get("description")
except ValueError:
pass
@classmethod
def send(cls, chat_id, text):
# Telegram.send is a separate method because it is also used in
# hc.front.views.telegram_bot to send invite links.
return cls.post(
cls.SM, json={"chat_id": chat_id, "text": text, "parse_mode": "html"}
)
def notify(self, check):
from hc.api.models import TokenBucket
if not TokenBucket.authorize_telegram(self.channel.telegram_id):
return "Rate limit exceeded"
text = tmpl("telegram_message.html", check=check)
return self.send(self.channel.telegram_id, text)
class Sms(HttpTransport):
URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json"
def is_noop(self, check):
return check.status != "down"
def notify(self, check):
profile = Profile.objects.for_user(self.channel.project.owner)
if not profile.authorize_sms():
profile.send_sms_limit_notice("SMS")
return "Monthly SMS limit exceeded"
url = self.URL % settings.TWILIO_ACCOUNT
auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH)
text = tmpl("sms_message.html", check=check, site_name=settings.SITE_NAME)
data = {
"From": settings.TWILIO_FROM,
"To": self.channel.sms_number,
"Body": text,
}
return self.post(url, data=data, auth=auth)
class WhatsApp(HttpTransport):
URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json"
def is_noop(self, check):
if check.status == "down":
return not self.channel.whatsapp_notify_down
else:
return not self.channel.whatsapp_notify_up
def notify(self, check):
profile = Profile.objects.for_user(self.channel.project.owner)
if not profile.authorize_sms():
profile.send_sms_limit_notice("WhatsApp")
return "Monthly message limit exceeded"
url = self.URL % settings.TWILIO_ACCOUNT
auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH)
text = tmpl("whatsapp_message.html", check=check, site_name=settings.SITE_NAME)
data = {
"From": "whatsapp:%s" % settings.TWILIO_FROM,
"To": "whatsapp:%s" % self.channel.sms_number,
"Body": text,
}
return self.post(url, data=data, auth=auth)
class Trello(HttpTransport):
URL = "https://api.trello.com/1/cards"
def is_noop(self, check):
return check.status != "down"
def notify(self, check):
params = {
"idList": self.channel.trello_list_id,
"name": tmpl("trello_name.html", check=check),
"desc": tmpl("trello_desc.html", check=check),
"key": settings.TRELLO_APP_KEY,
"token": self.channel.trello_token,
}
return self.post(self.URL, params=params)
class Apprise(HttpTransport):
def notify(self, check):
if not settings.APPRISE_ENABLED:
# Not supported and/or enabled
return "Apprise is disabled and/or not installed"
a = apprise.Apprise()
title = tmpl("apprise_title.html", check=check)
body = tmpl("apprise_description.html", check=check)
a.add(self.channel.value)
notify_type = (
apprise.NotifyType.SUCCESS
if check.status == "up"
else apprise.NotifyType.FAILURE
)
return (
"Failed"
if not a.notify(body=body, title=title, notify_type=notify_type)
else None
)
class MsTeams(HttpTransport):
def notify(self, check):
text = tmpl("msteams_message.json", check=check)
payload = json.loads(text)
return self.post(self.channel.value, json=payload)
class Zulip(HttpTransport):
@classmethod
def get_error(cls, response):
try:
return response.json().get("msg")
except ValueError:
pass
def notify(self, check):
_, domain = self.channel.zulip_bot_email.split("@")
url = "https://%s/api/v1/messages" % domain
auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key)
data = {
"type": self.channel.zulip_type,
"to": self.channel.zulip_to,
"topic": tmpl("zulip_topic.html", check=check),
"content": tmpl("zulip_content.html", check=check),
}
return self.post(url, data=data, auth=auth)
| 30.816807
| 88
| 0.588405
| 2,124
| 18,336
| 4.984463
| 0.169962
| 0.046755
| 0.025786
| 0.035704
| 0.439974
| 0.396524
| 0.358553
| 0.313403
| 0.304902
| 0.262397
| 0
| 0.005151
| 0.290685
| 18,336
| 594
| 89
| 30.868687
| 0.808857
| 0.062118
| 0
| 0.360382
| 0
| 0.004773
| 0.150471
| 0.025566
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107399
| false
| 0.00716
| 0.031026
| 0.016706
| 0.353222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8629c195c4f2a076441e398a8eff9a8680863488
| 9,419
|
py
|
Python
|
graviti/portex/builder.py
|
Graviti-AI/graviti-python-sdk
|
d2faf86b4718416503b965f6057b31015417446f
|
[
"MIT"
] | 12
|
2022-01-26T06:51:02.000Z
|
2022-03-22T21:28:35.000Z
|
graviti/portex/builder.py
|
Graviti-AI/graviti-python-sdk
|
d2faf86b4718416503b965f6057b31015417446f
|
[
"MIT"
] | 51
|
2022-02-22T07:19:34.000Z
|
2022-03-31T11:39:51.000Z
|
graviti/portex/builder.py
|
Graviti-AI/graviti-python-sdk
|
d2faf86b4718416503b965f6057b31015417446f
|
[
"MIT"
] | 5
|
2022-01-26T06:51:49.000Z
|
2022-03-08T03:41:11.000Z
|
#!/usr/bin/env python3
#
# Copyright 2022 Graviti. Licensed under MIT License.
#
"""Portex type builder related classes."""
from hashlib import md5
from pathlib import Path
from shutil import rmtree
from subprocess import PIPE, CalledProcessError, run
from tempfile import gettempdir
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar
import yaml
import graviti.portex.ptype as PTYPE
from graviti.exception import GitCommandError, GitNotFoundError
from graviti.portex.base import PortexRecordBase
from graviti.portex.external import PortexExternalType
from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory
from graviti.portex.package import ExternalPackage, Imports, packages
from graviti.portex.param import Param, Params
from graviti.portex.register import ExternalContainerRegister
if TYPE_CHECKING:
from subprocess import CompletedProcess
from graviti.portex.base import PortexType
EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER
_I = TypeVar("_I", bound="BuilderImports")
class PackageRepo:
"""The local git repo of the external Portex package.
Arguments:
url: The git repo url of the external package.
revision: The git repo revision (tag/commit) of the external package.
"""
_env: Dict[str, Any] = {}
def __init__(self, url: str, revision: str) -> None:
tempdir = Path(gettempdir()) / "portex"
tempdir.mkdir(exist_ok=True)
md5_instance = md5()
md5_instance.update(url.encode("utf-8"))
md5_instance.update(revision.encode("utf-8"))
self._path = tempdir / md5_instance.hexdigest()
self._url = url
self._revision = revision
try:
self._prepare_repo()
except FileNotFoundError:
raise GitNotFoundError() from None
def _prepare_repo(self) -> None:
if not self._path.exists():
self._clone_repo()
elif not self._check_repo_integrity():
rmtree(self._path)
self._clone_repo()
def _run(self, args: List[str]) -> "CompletedProcess[bytes]":
return run(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True)
def _init_repo(self) -> None:
self._run(["git", "init"])
self._run(["git", "remote", "add", "origin", self._url])
def _shallow_fetch(self) -> None:
self._run(["git", "fetch", "origin", self._revision, "--depth=1"])
self._run(["git", "checkout", "FETCH_HEAD"])
def _deep_fetch(self) -> None:
try:
self._run(["git", "fetch", "origin"])
except CalledProcessError as error:
raise GitCommandError(
"'git fetch' failed, most likely due to the repo url is invalid.",
error,
) from None
try:
self._run(["git", "checkout", self._revision])
except CalledProcessError as error:
raise GitCommandError(
"'git checkout' failed, most likely due to the repo revision is invalid.",
error,
) from None
def _check_repo_integrity(self) -> bool:
try:
result = self._run(["git", "status", "--porcelain"])
except CalledProcessError:
# The git command failed means the git repo has been cleaned or broken
return False
return not bool(result.stdout)
def _clone_repo(self) -> None:
print(f"Cloning repo '{self._url}@{self._revision}'")
path = self._path
path.mkdir()
try:
self._init_repo()
try:
self._shallow_fetch()
except CalledProcessError:
self._deep_fetch()
except (CalledProcessError, GitCommandError, FileNotFoundError):
rmtree(path)
raise
print(f"Cloned to '{path}'")
def get_root(self) -> Path:
"""Get the root directory path of the package repo.
Returns:
The root directory path of the package repo.
Raises:
TypeError: when the "ROOT.yaml" not found or more than one "ROOT.yaml" found.
"""
roots = list(self._path.glob("**/ROOT.yaml"))
if len(roots) == 0:
raise TypeError("No 'ROOT.yaml' file found")
if len(roots) >= 2:
raise TypeError("More than one 'ROOT.yaml' file found")
return roots[0].parent
class PackageBuilder:
"""The builder of the external Portex package.
Arguments:
url: The git repo url of the external package.
revision: The git repo revision (tag/commit) of the external package.
"""
def __init__(self, url: str, revision: str) -> None:
self.package = ExternalPackage(url, revision)
self._builders = self._create_type_builders()
def __getitem__(self, key: str) -> Type["PortexExternalType"]:
try:
return self.package[key]
except KeyError:
return self._builders.__getitem__(key).build()
def _create_type_builders(self) -> Dict[str, "TypeBuilder"]:
repo = PackageRepo(self.package.url, self.package.revision)
root = repo.get_root()
builders = {}
for yaml_file in root.glob("**/*.yaml"):
if yaml_file.name == "ROOT.yaml":
continue
parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem)
name = ".".join(parts)
builders[name] = TypeBuilder(name, yaml_file, self)
return builders
def build(self) -> ExternalPackage:
"""Build the Portex external package.
Returns:
The builded Portex external package.
"""
for builder in self._builders.values():
if builder.is_building:
continue
builder.build()
return self.package
class TypeBuilder:
"""The builder of the external Portex template type.
Arguments:
name: The name of the Portex template type.
path: The source file path of the Portex template type.
package: The package the Portex template type belongs to.
"""
def __init__(self, name: str, path: Path, builder: PackageBuilder) -> None:
self._name = name
self._path = path
self._builder = builder
self.is_building = False
def build(self) -> Type["PortexExternalType"]:
"""Build the Portex external type.
Returns:
The builded Portex external type.
Raises:
TypeError: Raise when circular reference detected.
"""
if self.is_building:
raise TypeError("Circular reference")
self.is_building = True
with self._path.open() as fp:
content = yaml.load(fp, yaml.Loader)
params_pyobj = content.get("parameters", [])
decl = content["declaration"]
imports = BuilderImports.from_pyobj(content.get("imports", []), self._builder)
factory = TypeFactory(decl, imports)
keys = factory.keys
params = Params.from_pyobj(params_pyobj)
for key, value in params.items():
value.ptype = keys.get(key, PTYPE.Any)
params.add(Param("nullable", False, ptype=PTYPE.Boolean))
class_attrs: Dict[str, Any] = {
"params": params,
"factory": factory,
"package": self._builder.package,
}
if issubclass(factory.class_, PortexRecordBase):
bases: Tuple[Type["PortexType"], ...] = (PortexRecordBase, PortexExternalType)
class_attrs["_fields_factory"] = ConnectedFieldsFactory(
decl, factory.class_, imports, factory.transform_kwargs
)
else:
bases = (PortexExternalType,)
type_ = type(self._name, bases, class_attrs)
self._builder.package[self._name] = type_
return type_
class BuilderImports(Imports):
"""The imports of the Portex template type.
Arguments:
package: The package the portex belongs to.
"""
_builder: PackageBuilder
def __getitem__(self, key: str) -> Type["PortexType"]:
try:
return super().__getitem__(key)
except KeyError:
return self._builder.__getitem__(key)
@classmethod
def from_pyobj( # type: ignore[override] # pylint: disable=arguments-differ
cls: Type[_I], content: List[Dict[str, Any]], builder: PackageBuilder
) -> _I:
"""Create :class:`Imports` instance from python list.
Arguments:
content: A python list representing imported types.
builder: The package builder.
Returns:
A :class:`Imports` instance created from the input python list.
"""
imports = super().from_pyobj(content)
imports._builder = builder # pylint: disable=protected-access
return imports
def build_package(url: str, revision: str) -> ExternalPackage:
"""Build an external package.
Arguments:
url: The git repo url of the external package.
revision: The git repo revision (tag/commit) of the external package.
Returns:
The :class:`ExternalPackage` instance.
"""
builder = PackageBuilder(url, revision)
package = builder.build()
packages.externals[url, revision] = package
return package
| 29.996815
| 93
| 0.62013
| 1,046
| 9,419
| 5.438815
| 0.218929
| 0.012304
| 0.020566
| 0.021093
| 0.212164
| 0.13113
| 0.115838
| 0.08701
| 0.063104
| 0.063104
| 0
| 0.002503
| 0.278904
| 9,419
| 313
| 94
| 30.092652
| 0.8351
| 0.196199
| 0
| 0.151163
| 0
| 0
| 0.084356
| 0.007305
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104651
| false
| 0
| 0.145349
| 0.005814
| 0.360465
| 0.011628
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
862a6e4ef7c112a1f58f960d0cfe8a4298a64c51
| 3,184
|
py
|
Python
|
dffml/operation/mapping.py
|
SGeetansh/dffml
|
04647bdcadef2f7e7b59cdd8ac1e89f17ef1095b
|
[
"MIT"
] | 171
|
2019-03-08T19:02:06.000Z
|
2022-03-29T16:17:23.000Z
|
dffml/operation/mapping.py
|
NikhilBartwal/dffml
|
16180144f388924d9e5840c4aa80d08970af5e60
|
[
"MIT"
] | 1,158
|
2019-03-08T19:07:50.000Z
|
2022-03-25T08:28:27.000Z
|
dffml/operation/mapping.py
|
NikhilBartwal/dffml
|
16180144f388924d9e5840c4aa80d08970af5e60
|
[
"MIT"
] | 183
|
2019-03-10T02:40:56.000Z
|
2022-03-27T18:51:26.000Z
|
from typing import Dict, List, Any
from ..df.types import Definition
from ..df.base import op
from ..util.data import traverse_get
MAPPING = Definition(name="mapping", primitive="map")
MAPPING_TRAVERSE = Definition(name="mapping_traverse", primitive="List[str]")
MAPPING_KEY = Definition(name="key", primitive="str")
MAPPING_VALUE = Definition(name="value", primitive="generic")
@op(
name="dffml.mapping.extract",
inputs={"mapping": MAPPING, "traverse": MAPPING_TRAVERSE},
outputs={"value": MAPPING_VALUE},
)
def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]):
"""
Extracts value from a given mapping.
Parameters
----------
mapping : dict
The mapping to extract the value from.
traverse : list[str]
A list of keys to traverse through the mapping dictionary and extract the values.
Returns
-------
dict
A dictionary containing the value of the keys.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle)
>>>
>>> dataflow.seed.append(
... Input(
... value=[mapping_extract_value.op.outputs["value"].name],
... definition=GetSingle.op.inputs["spec"],
... )
... )
>>> inputs = [
... Input(
... value={"key1": {"key2": 42}},
... definition=mapping_extract_value.op.inputs["mapping"],
... ),
... Input(
... value=["key1", "key2"],
... definition=mapping_extract_value.op.inputs["traverse"],
... ),
... ]
>>>
>>> async def main():
... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):
... print(result)
>>>
>>> asyncio.run(main())
{'value': 42}
"""
return {"value": traverse_get(mapping, *traverse)}
@op(
name="dffml.mapping.create",
inputs={"key": MAPPING_KEY, "value": MAPPING_VALUE},
outputs={"mapping": MAPPING},
)
def create_mapping(key: str, value: Any):
"""
Creates a mapping of a given key and value.
Parameters
----------
key : str
The key for the mapping.
value : Any
The value for the mapping.
Returns
-------
dict
A dictionary containing the mapping created.
Examples
--------
>>> import asyncio
>>> from dffml import *
>>>
>>> dataflow = DataFlow.auto(create_mapping, GetSingle)
>>> dataflow.seed.append(
... Input(
... value=[create_mapping.op.outputs["mapping"].name],
... definition=GetSingle.op.inputs["spec"],
... )
... )
>>> inputs = [
... Input(
... value="key1", definition=create_mapping.op.inputs["key"],
... ),
... Input(
... value=42, definition=create_mapping.op.inputs["value"],
... ),
... ]
>>>
>>> async def main():
... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):
... print(result)
>>>
>>> asyncio.run(main())
{'mapping': {'key1': 42}}
"""
return {"mapping": {key: value}}
| 26.533333
| 89
| 0.557161
| 324
| 3,184
| 5.398148
| 0.20679
| 0.048027
| 0.054317
| 0.036021
| 0.391081
| 0.355632
| 0.230989
| 0.230989
| 0.230989
| 0.166953
| 0
| 0.006009
| 0.268216
| 3,184
| 119
| 90
| 26.756303
| 0.744635
| 0.618719
| 0
| 0.090909
| 0
| 0
| 0.161143
| 0.024
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
862ab8872e3c569f3400e44a0e697886a1c4335b
| 13,859
|
py
|
Python
|
anchore_engine/services/policy_engine/__init__.py
|
Vijay-P/anchore-engine
|
660a0bf10c56d16f894919209c51ec7a12081e9b
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/__init__.py
|
Vijay-P/anchore-engine
|
660a0bf10c56d16f894919209c51ec7a12081e9b
|
[
"Apache-2.0"
] | null | null | null |
anchore_engine/services/policy_engine/__init__.py
|
Vijay-P/anchore-engine
|
660a0bf10c56d16f894919209c51ec7a12081e9b
|
[
"Apache-2.0"
] | null | null | null |
import time
import sys
import pkg_resources
import os
import retrying
from sqlalchemy.exc import IntegrityError
# anchore modules
import anchore_engine.clients.services.common
import anchore_engine.subsys.servicestatus
import anchore_engine.subsys.metrics
from anchore_engine.subsys import logger
from anchore_engine.configuration import localconfig
from anchore_engine.clients.services import simplequeue, internal_client_for
from anchore_engine.clients.services.simplequeue import SimpleQueueClient
from anchore_engine.service import ApiService, LifeCycleStages
from anchore_engine.services.policy_engine.engine.feeds.feeds import (
VulnerabilityFeed,
NvdV2Feed,
PackagesFeed,
VulnDBFeed,
GithubFeed,
feed_registry,
NvdFeed,
)
# from anchore_engine.subsys.logger import enable_bootstrap_logging
# enable_bootstrap_logging()
from anchore_engine.utils import timer
feed_sync_queuename = "feed_sync_tasks"
system_user_auth = None
feed_sync_msg = {"task_type": "feed_sync", "enabled": True}
# These are user-configurable but mostly for debugging and testing purposes
try:
FEED_SYNC_RETRIES = int(os.getenv("ANCHORE_FEED_SYNC_CHECK_RETRIES", 5))
except ValueError:
logger.exception(
"Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5"
)
FEED_SYNC_RETRIES = 5
try:
FEED_SYNC_RETRY_BACKOFF = int(
os.getenv("ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF", 5)
)
except ValueError:
logger.exception(
"Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5"
)
FEED_SYNC_RETRY_BACKOFF = 5
try:
feed_config_check_retries = int(os.getenv("FEED_CLIENT_CHECK_RETRIES", 3))
except ValueError:
logger.exception(
"Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3"
)
feed_config_check_retries = 3
try:
feed_config_check_backoff = int(os.getenv("FEED_CLIENT_CHECK_BACKOFF", 5))
except ValueError:
logger.exception(
"Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5"
)
feed_config_check_backoff = 5
# service funcs (must be here)
def _check_feed_client_credentials():
from anchore_engine.services.policy_engine.engine.feeds.client import get_client
sleep_time = feed_config_check_backoff
last_ex = None
for i in range(feed_config_check_retries):
if i > 0:
logger.info(
"Waiting for {} seconds to try feeds client config check again".format(
sleep_time
)
)
time.sleep(sleep_time)
sleep_time += feed_config_check_backoff
try:
logger.info(
"Checking feeds client credentials. Attempt {} of {}".format(
i + 1, feed_config_check_retries
)
)
client = get_client()
client = None
logger.info("Feeds client credentials ok")
return True
except Exception as e:
logger.warn(
"Could not verify feeds endpoint and/or config. Got exception: {}".format(
e
)
)
last_ex = e
else:
if last_ex:
raise last_ex
else:
raise Exception(
"Exceeded retries for feeds client config check. Failing check"
)
def _system_creds():
global system_user_auth
if not system_user_auth:
config = localconfig.get_config()
system_user_auth = config["system_user_auth"]
return system_user_auth
def process_preflight():
"""
Execute the preflight functions, aborting service startup if any throw uncaught exceptions or return False return value
:return:
"""
preflight_check_functions = [init_db_content, init_feed_registry]
for fn in preflight_check_functions:
try:
fn()
except Exception as e:
logger.exception(
"Preflight checks failed with error: {}. Aborting service startup".format(
e
)
)
sys.exit(1)
def _init_distro_mappings():
from anchore_engine.db import session_scope, DistroMapping
initial_mappings = [
DistroMapping(from_distro="alpine", to_distro="alpine", flavor="ALPINE"),
DistroMapping(from_distro="busybox", to_distro="busybox", flavor="BUSYB"),
DistroMapping(from_distro="centos", to_distro="rhel", flavor="RHEL"),
DistroMapping(from_distro="debian", to_distro="debian", flavor="DEB"),
DistroMapping(from_distro="fedora", to_distro="rhel", flavor="RHEL"),
DistroMapping(from_distro="ol", to_distro="ol", flavor="RHEL"),
DistroMapping(from_distro="rhel", to_distro="rhel", flavor="RHEL"),
DistroMapping(from_distro="ubuntu", to_distro="ubuntu", flavor="DEB"),
DistroMapping(from_distro="amzn", to_distro="amzn", flavor="RHEL"),
DistroMapping(from_distro="redhat", to_distro="rhel", flavor="RHEL"),
]
# set up any data necessary at system init
try:
logger.info(
"Checking policy engine db initialization. Checking initial set of distro mappings"
)
with session_scope() as dbsession:
distro_mappings = dbsession.query(DistroMapping).all()
for i in initial_mappings:
if not [x for x in distro_mappings if x.from_distro == i.from_distro]:
logger.info("Adding missing mapping: {}".format(i))
dbsession.add(i)
logger.info("Distro mapping initialization complete")
except Exception as err:
if isinstance(err, IntegrityError):
logger.warn("another process has already initialized, continuing")
else:
raise Exception(
"unable to initialize default distro mappings - exception: " + str(err)
)
return True
def init_db_content():
"""
Initialize the policy engine db with any data necessary at startup.
:return:
"""
return _init_distro_mappings()
def init_feed_registry():
# Register feeds, the tuple is the class and bool if feed is a distro vulnerability feed or not
for cls_tuple in [
(NvdV2Feed, False),
(VulnDBFeed, False),
(VulnerabilityFeed, True),
(PackagesFeed, False),
(GithubFeed, False),
(NvdFeed, False),
]:
logger.info("Registering feed handler {}".format(cls_tuple[0].__feed_name__))
feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1])
def do_feed_sync(msg):
if "FeedsUpdateTask" not in locals():
from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask
if "get_selected_feeds_to_sync" not in locals():
from anchore_engine.services.policy_engine.engine.feeds.sync import (
get_selected_feeds_to_sync,
)
handler_success = False
timer = time.time()
logger.info("FIRING: feed syncer")
try:
feeds = get_selected_feeds_to_sync(localconfig.get_config())
logger.info("Syncing configured feeds: {}".format(feeds))
result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get("data"))
if result is not None:
handler_success = True
else:
logger.warn("Feed sync task marked as disabled, so skipping")
except ValueError as e:
logger.warn("Received msg of wrong type")
except Exception as err:
logger.warn("failure in feed sync handler - exception: " + str(err))
if handler_success:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function="do_feed_sync",
status="success",
)
else:
anchore_engine.subsys.metrics.summary_observe(
"anchore_monitor_runtime_seconds",
time.time() - timer,
function="do_feed_sync",
status="fail",
)
def handle_feed_sync(*args, **kwargs):
"""
Initiates a feed sync in the system in response to a message from the queue
:param args:
:param kwargs:
:return:
"""
system_user = _system_creds()
logger.info("init args: {}".format(kwargs))
cycle_time = kwargs["mythread"]["cycle_timer"]
while True:
config = localconfig.get_config()
feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True)
if feed_sync_enabled:
logger.info("Feed sync task executor activated")
try:
run_feed_sync(system_user)
except Exception as e:
logger.error("Caught escaped error in feed sync handler: {}".format(e))
finally:
logger.info("Feed sync task executor complete")
else:
logger.info("sync_enabled is set to false in config - skipping feed sync")
time.sleep(cycle_time)
return True
@retrying.retry(
stop_max_attempt_number=FEED_SYNC_RETRIES,
wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000,
wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000,
)
def run_feed_sync(system_user):
all_ready = anchore_engine.clients.services.common.check_services_ready(
["simplequeue"]
)
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
raise Exception("Simplequeue service not yet ready")
else:
try:
# This has its own retry on the queue fetch, so wrap with catch block to ensure we don't double-retry on task exec
simplequeue.run_target_with_queue_ttl(
None,
queue=feed_sync_queuename,
target=do_feed_sync,
max_wait_seconds=30,
visibility_timeout=180,
retries=FEED_SYNC_RETRIES,
backoff_time=FEED_SYNC_RETRY_BACKOFF,
)
except Exception as err:
logger.warn("failed to process task this cycle: " + str(err))
def handle_feed_sync_trigger(*args, **kwargs):
"""
Checks to see if there is a task for a feed sync in the queue and if not, adds one.
Interval for firing this should be longer than the expected feed sync duration.
:param args:
:param kwargs:
:return:
"""
system_user = _system_creds()
logger.info("init args: {}".format(kwargs))
cycle_time = kwargs["mythread"]["cycle_timer"]
while True:
config = localconfig.get_config()
feed_sync_enabled = config.get("feeds", {}).get("sync_enabled", True)
if feed_sync_enabled:
logger.info("Feed Sync task creator activated")
try:
push_sync_task(system_user)
logger.info("Feed Sync Trigger done, waiting for next cycle.")
except Exception as e:
logger.error(
"Error caught in feed sync trigger handler after all retries. Will wait for next cycle"
)
finally:
logger.info("Feed Sync task creator complete")
else:
logger.info(
"sync_enabled is set to false in config - skipping feed sync trigger"
)
time.sleep(cycle_time)
return True
@retrying.retry(
stop_max_attempt_number=FEED_SYNC_RETRIES,
wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000,
wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000,
)
def push_sync_task(system_user):
all_ready = anchore_engine.clients.services.common.check_services_ready(
["simplequeue"]
)
if not all_ready:
logger.info("simplequeue service not yet ready, will retry")
raise Exception("Simplequeue service not yet ready")
else:
# q_client = SimpleQueueClient(user=system_user[0], password=system_user[1])
q_client = internal_client_for(SimpleQueueClient, userId=None)
if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg):
try:
q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg)
except:
logger.error("Could not enqueue message for a feed sync")
raise
class PolicyEngineService(ApiService):
__service_name__ = "policy_engine"
__spec_dir__ = pkg_resources.resource_filename(__name__, "swagger")
__monitors__ = {
"service_heartbeat": {
"handler": anchore_engine.subsys.servicestatus.handle_service_heartbeat,
"taskType": "handle_service_heartbeat",
"args": [__service_name__],
"cycle_timer": 60,
"min_cycle_timer": 60,
"max_cycle_timer": 60,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"feed_sync_checker": {
"handler": handle_feed_sync_trigger,
"taskType": "handle_feed_sync_trigger",
"args": [],
"cycle_timer": 600,
"min_cycle_timer": 300,
"max_cycle_timer": 100000,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
"feed_sync": {
"handler": handle_feed_sync,
"taskType": "handle_feed_sync",
"args": [],
"cycle_timer": 3600,
"min_cycle_timer": 1800,
"max_cycle_timer": 100000,
"last_queued": 0,
"last_return": False,
"initialized": False,
},
}
__lifecycle_handlers__ = {
LifeCycleStages.pre_register: [
(process_preflight, None),
]
}
| 32.91924
| 126
| 0.637925
| 1,610
| 13,859
| 5.235404
| 0.2
| 0.055048
| 0.024202
| 0.016609
| 0.409539
| 0.34844
| 0.307984
| 0.300629
| 0.248784
| 0.233242
| 0
| 0.007776
| 0.27621
| 13,859
| 420
| 127
| 32.997619
| 0.832519
| 0.076412
| 0
| 0.323077
| 0
| 0
| 0.218511
| 0.029618
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033846
| false
| 0
| 0.061538
| 0
| 0.129231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
862c0ef5874a647cec05d7913d882ea14b577a42
| 1,767
|
py
|
Python
|
juriscraper/oral_args/united_states/federal_appellate/scotus.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 228
|
2015-01-23T04:41:39.000Z
|
2022-03-30T09:52:20.000Z
|
juriscraper/oral_args/united_states/federal_appellate/scotus.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 331
|
2015-01-05T18:53:40.000Z
|
2022-03-29T23:43:30.000Z
|
juriscraper/oral_args/united_states/federal_appellate/scotus.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 84
|
2015-01-03T01:19:21.000Z
|
2022-03-01T08:09:32.000Z
|
"""Scraper for Supreme Court of U.S.
CourtID: scotus
Court Short Name: scotus
History:
- 2014-07-20 - Created by Andrei Chelaru, reviewed by MLR
- 2017-10-09 - Updated by MLR.
"""
from datetime import datetime
from juriscraper.OralArgumentSite import OralArgumentSite
class Site(OralArgumentSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = (
"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx"
)
self.back_scrape_iterable = list(range(2010, 2015))
def _get_download_urls(self):
path = "id('list')//tr//a/text()"
return list(map(self._return_download_url, self.html.xpath(path)))
@staticmethod
def _return_download_url(d):
file_type = "mp3" # or 'wma' is also available for any case.
download_url = "http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}".format(
type=file_type, docket_number=d
)
return download_url
def _get_case_names(self):
path = "id('list')//tr/td/span/text()"
return [s.lstrip(". ") for s in self.html.xpath(path)]
def _get_case_dates(self):
path = "id('list')//tr/td[2]//text()"
return [
datetime.strptime(s, "%m/%d/%y").date()
for s in self.html.xpath(path)
if not "Date" in s
]
def _get_docket_numbers(self):
path = "id('list')//tr//a/text()"
return list(self.html.xpath(path))
def _download_backwards(self, year):
self.url = (
"http://www.supremecourt.gov/oral_arguments/argument_audio/%s"
% year
)
self.html = self._download()
| 31
| 107
| 0.611771
| 231
| 1,767
| 4.480519
| 0.437229
| 0.038647
| 0.038647
| 0.054106
| 0.291787
| 0.245411
| 0.210628
| 0.166184
| 0.166184
| 0.10628
| 0
| 0.019564
| 0.247878
| 1,767
| 56
| 108
| 31.553571
| 0.759217
| 0.122241
| 0
| 0.102564
| 0
| 0
| 0.205959
| 0.068005
| 0
| 0
| 0
| 0
| 0
| 1
| 0.179487
| false
| 0
| 0.051282
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
862c27d164efa5a02f7a2714b410e87587a9e318
| 26,357
|
py
|
Python
|
code/main.py
|
pengzhansun/CF-CAR
|
2e497a4da0bcc80bb327ee041f1aa0107f53bc3f
|
[
"MIT"
] | 8
|
2022-03-19T06:53:43.000Z
|
2022-03-30T06:37:48.000Z
|
code/main.py
|
pengzhansun/CF-CAR
|
2e497a4da0bcc80bb327ee041f1aa0107f53bc3f
|
[
"MIT"
] | 1
|
2022-03-22T12:03:23.000Z
|
2022-03-23T02:40:52.000Z
|
code/main.py
|
pengzhansun/CF-CAR
|
2e497a4da0bcc80bb327ee041f1aa0107f53bc3f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import argparse
import os
import shutil
import time
import numpy as np
import random
from collections import OrderedDict
import torch
import torch.backends.cudnn as cudnn
from callbacks import AverageMeter
from data_utils.causal_data_loader_frames import VideoFolder
from utils import save_results
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Counterfactual CAR')
# Path, dataset and log related arguments
parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/',
help='path to the folder with frames')
parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json',
help='path to the json file with train video meta data')
parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json',
help='path to the json file with validation video meta data')
parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json',
help='path to the json file with ground truth labels')
parser.add_argument('--dataset', default='smth_smth',
help='which dataset to train')
parser.add_argument('--logname', default='my_method',
help='name of the experiment for checkpoints and logs')
parser.add_argument('--print_freq', '-p', default=20, type=int,
metavar='N', help='print frequency (default: 20)')
parser.add_argument('--ckpt', default='./ckpt',
help='folder to output checkpoints')
parser.add_argument('--resume_vision', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_coord', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# model, image&feature dim and training related arguments
parser.add_argument('--model_vision', default='rgb_roi')
parser.add_argument('--model_coord', default='interaction')
parser.add_argument('--model_fusion', default='concat_fusion')
parser.add_argument('--fusion_function', default='fused_sum', type=str,
help='function for fusing activations from each branch')
parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N',
help='intermediate feature dimension for image-based features')
parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N',
help='intermediate feature dimension for coord-based features')
parser.add_argument('--size', default=224, type=int, metavar='N',
help='primary image input size')
parser.add_argument('--num_boxes', default=4, type=int,
help='num of boxes for each image')
parser.add_argument('--num_frames', default=16, type=int,
help='num of frames for the model')
parser.add_argument('--num_classes', default=174, type=int,
help='num of class in the model')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start_epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size', '-b', default=16, type=int,
metavar='N', help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs="+",
metavar='LRSteps', help='epochs to decay learning rate by 10')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--clip_gradient', '-cg', default=5, type=float,
metavar='W', help='gradient norm clipping (default: 5)')
parser.add_argument('--search_stride', type=int, default=5, help='test performance every n strides')
# train mode, hardware setting and others related arguments
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation set')
parser.add_argument('--parallel', default=True, type=bool,
help='whether or not train with multi GPUs')
parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the index of gpu you want to use')
best_loss = 1000000
def main():
global args, best_loss
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
print(args)
# create vision model
if args.model_vision == 'global_i3d':
from model.model_lib import VideoGlobalModel as RGBModel
print('global_i3d loaded!!')
elif args.model_vision == 'rgb_roi':
from model.model_lib import BboxVisualModel as RGBModel
print('rgb_roi loaded!!')
else:
print("no such a vision model!")
# create coord model
if args.model_coord == 'interaction':
from model.model_lib import BboxInteractionLatentModel as BboxModel
print('interaction loaded!!')
else:
print("no such a coordinate model!")
# create fusion model
if args.model_fusion == 'concat_fusion':
from model.model_lib import ConcatFusionModel as FusionModel
print('concat_fusion loaded!!')
else:
print('no such a fusion model!')
# load model branch
vision_model = RGBModel(args)
coord_model = BboxModel(args)
fusion_model = FusionModel(args)
# create the fusion function for the activation of three branches
if args.fusion_function == 'fused_sum':
from fusion_function import logsigsum as fusion_func
print('fused_sum loaded!!')
elif args.fusion_function == 'naive_sum':
from fusion_function import naivesum as fusion_func
print('naive_sum loaded!!')
else:
print('no such a fusion function!')
fusion_function = fusion_func()
if args.parallel:
vision_model = torch.nn.DataParallel(vision_model).cuda()
coord_model = torch.nn.DataParallel(coord_model).cuda()
fusion_model = torch.nn.DataParallel(fusion_model).cuda()
else:
vision_model = vision_model.cuda()
coord_model = coord_model.cuda()
fusion_model = fusion_model.cuda()
# optionally resume vision model from a checkpoint
if args.resume_vision:
assert os.path.isfile(args.resume_vision), "No checkpoint found at '{}'".format(args.resume_vision)
print("=> loading checkpoint '{}'".format(args.resume_vision))
checkpoint = torch.load(args.resume_vision)
if args.start_epoch is None:
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
vision_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume_vision, checkpoint['epoch']))
# optionally resume coord model from a checkpoint
if args.resume_coord:
assert os.path.isfile(args.resume_coord), "No checkpoint found at '{}'".format(args.resume_coord)
print("=> loading checkpoint '{}'".format(args.resume_coord))
checkpoint = torch.load(args.resume_coord)
if args.start_epoch is None:
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
coord_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume_coord, checkpoint['epoch']))
if args.resume_fusion:
assert os.path.isfile(args.resume_fusion), "No checkpoint found at '{}'".format(args.resume_fusion)
print("=> loading checkpoint '{}'".format(args.resume_fusion))
checkpoint = torch.load(args.resume_fusion)
if args.start_epoch is None:
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
fusion_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume_fusion, checkpoint['epoch']))
if args.start_epoch is None:
args.start_epoch = 0
cudnn.benchmark = True
# create training and validation dataset
dataset_train = VideoFolder(root=args.root_frames,
num_boxes=args.num_boxes,
file_input=args.json_data_train,
file_labels=args.json_file_labels,
frames_duration=args.num_frames,
args=args,
is_val=False,
if_augment=True,
)
dataset_val = VideoFolder(root=args.root_frames,
num_boxes=args.num_boxes,
file_input=args.json_data_val,
file_labels=args.json_file_labels,
frames_duration=args.num_frames,
args=args,
is_val=True,
if_augment=True,
)
# create training and validation loader
train_loader = torch.utils.data.DataLoader(
dataset_train,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, drop_last=True,
pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
dataset_val, drop_last=True,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=False
)
model_list = [vision_model, coord_model, fusion_model]
optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()),
momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()),
momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()),
momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)
optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion]
criterion = torch.nn.CrossEntropyLoss()
search_list = np.linspace(0.0, 1.0, 11)
# factual inference (vanilla test stage)
if args.evaluate:
validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict)
return
# Counterfactual inference by trying a list of hyperparameter
if args.cf_inference_group:
cf_inference_group(val_loader, model_list, fusion_function, search_list,
class_to_idx=dataset_val.classes_dict)
return
print('training begin...')
for epoch in tqdm(range(args.start_epoch, args.epochs)):
adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision')
adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord')
adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion')
# train for one epoch
train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion)
if (epoch+1) >= 30 and (epoch + 1) % args.search_stride == 0:
loss = validate(val_loader, model_list, fusion_function, criterion,
epoch=epoch, class_to_idx=dataset_val.classes_dict)
else:
loss = 100
# remember best loss and save checkpoint
is_best = loss < best_loss
best_loss = min(loss, best_loss)
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': vision_model.state_dict(),
'best_loss': best_loss,
},
is_best,
os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname)))
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': coord_model.state_dict(),
'best_loss': best_loss,
},
is_best,
os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname)))
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': fusion_model.state_dict(),
'best_loss': best_loss,
},
is_best,
os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname)))
def train(train_loader, model_list, fusion_function,
optimizer_list, epoch, criterion):
global args
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
# load three model branches
[vision_model, coord_model, fusion_model] = model_list
# load four optimizers, including the one designed for uniform assumption
[optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list
# switch to train mode
vision_model.train()
coord_model.train()
fusion_model.train()
end = time.time()
for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(train_loader):
data_time.update(time.time() - end)
# obtain the activation and vision features from vision branch
output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label)
output_vision = output_vision.view((-1, len(train_loader.dataset.classes)))
# obtain the activation and coordinate features from coordinate branch
output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label)
output_coord = output_coord.view((-1, len(train_loader.dataset.classes)))
# detach the computation graph, avoid the gradient confusion
feature_vision_detached = feature_vision.detach()
feature_coord_detached = feature_coord.detach()
# obtain the activation of fusion branch
output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda())
output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes)))
output_factual = fusion_function(output_vision, output_coord, output_fusion)
# loss_fusion is the loss of output_fusion(fused, obtained from the fusion_function)
loss_vision = criterion(output_vision, video_label.long().cuda())
loss_coord = criterion(output_coord, video_label.long().cuda())
loss_fusion = criterion(output_fusion, video_label.long().cuda())
loss_factual = criterion(output_factual, video_label.long().cuda())
# Measure the accuracy of the sum of three branch activation results
acc1, acc5 = accuracy(output_factual.cpu(), video_label, topk=(1, 5))
# record the accuracy and loss
losses.update(loss_factual.item(), global_img_tensors.size(0))
acc_top1.update(acc1.item(), global_img_tensors.size(0))
acc_top5.update(acc5.item(), global_img_tensors.size(0))
# refresh the optimizer
optimizer_vision.zero_grad()
optimizer_coord.zero_grad()
optimizer_fusion.zero_grad()
loss = loss_vision + loss_coord + loss_factual
loss.backward()
if args.clip_gradient is not None:
torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient)
# update the parameter
optimizer_vision.step()
optimizer_coord.step()
optimizer_fusion.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\t'
'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses,
acc_top1=acc_top1, acc_top5=acc_top5))
def validate(val_loader, model_list, fusion_function, criterion,
epoch=None, class_to_idx=None):
batch_time = AverageMeter()
losses = AverageMeter()
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
logits_matrix = []
targets_list = []
# unpack three models
[vision_model, coord_model, fusion_model] = model_list
# switch to evaluate mode
vision_model.eval()
coord_model.eval()
fusion_model.eval()
end = time.time()
for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader):
# compute output
with torch.no_grad():
output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label)
output_vision = output_vision.view((-1, len(val_loader.dataset.classes)))
output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label)
output_coord = output_coord.view((-1, len(val_loader.dataset.classes)))
# detach the computation graph, avoid the gradient confusion
feature_vision_detached = feature_vision.detach()
feature_coord_detached = feature_coord.detach()
# obtain the activation of fusion branch
output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda())
output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes)))
# fuse three outputs
output_factual = fusion_function(output_vision, output_coord, output_fusion)
# warning: loss_fusion is the loss of output_fusion(fused, obtained from the fusion_function)
loss_vision = criterion(output_vision, video_label.long().cuda())
loss_coord = criterion(output_coord, video_label.long().cuda())
loss_fusion = criterion(output_factual, video_label.long().cuda())
# statistic result from fusion_branch or value after fusion function
output = output_factual
loss = loss_vision
acc1, acc5 = accuracy(output.cpu(), video_label, topk=(1, 5))
if args.evaluate:
logits_matrix.append(output.cpu().data.numpy())
targets_list.append(video_label.cpu().numpy())
# measure accuracy and record loss
losses.update(loss.item(), global_img_tensors.size(0))
acc_top1.update(acc1.item(), global_img_tensors.size(0))
acc_top5.update(acc5.item(), global_img_tensors.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i + 1 == len(val_loader):
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\t'
'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\t'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
acc_top1=acc_top1, acc_top5=acc_top5,
))
if args.evaluate:
logits_matrix = np.concatenate(logits_matrix)
targets_list = np.concatenate(targets_list)
save_results(logits_matrix, targets_list, class_to_idx, args)
return losses.avg
def cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None):
batch_time = AverageMeter()
search_length = len(search_list)
search_dict = {}
for i in range(search_length):
search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter()
search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter()
[vision_model, coord_model, fusion_model] = model_list
# switch to evaluate mode
vision_model.eval()
coord_model.eval()
fusion_model.eval()
end = time.time()
for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader):
# compute output
with torch.no_grad():
# factual inference
output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(),
video_label)
output_vision = output_vision.view((-1, len(val_loader.dataset.classes)))
output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(),
video_label)
output_coord = output_coord.view((-1, len(val_loader.dataset.classes)))
# obtain the activation of fusion branch
output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda())
output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes)))
# fuse three outputs
output_factual = fusion_function(output_vision, output_coord, output_fusion)
# counterfactual inference
output_vision_subtrahend = output_vision
output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0))
for j in range(search_length):
weight = search_list[j]
output_debiased = output_factual - output_counterfactual * weight
acc1, acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1, 5))
search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0))
search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i + 1 == len(val_loader):
print('Cf-Inference: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\t'
'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\t'
'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\t'
'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\t'
'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format(
i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'],
acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'],
acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0']))
for k in range(search_length):
print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg,
search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg)
return
def save_checkpoint(state, is_best, filename):
torch.save(state, filename + '_latest.pth.tar')
if is_best:
shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar')
def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None):
"""Sets the learning rate to the initial LR decayed by 10"""
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
lr = args.lr * decay
if branch_name == 'vision':
for param_group in optimizer.param_groups:
param_group['lr'] = lr * 0.8
elif branch_name == 'coord':
for param_group in optimizer.param_groups:
param_group['lr'] = lr
elif branch_name == 'fusion':
for param_group in optimizer.param_groups:
param_group['lr'] = lr
else:
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 45.679376
| 133
| 0.624009
| 3,226
| 26,357
| 4.857099
| 0.124923
| 0.020103
| 0.037973
| 0.007658
| 0.580382
| 0.514391
| 0.472908
| 0.423001
| 0.402706
| 0.360521
| 0
| 0.015175
| 0.262435
| 26,357
| 576
| 134
| 45.758681
| 0.790844
| 0.072163
| 0
| 0.307512
| 0
| 0
| 0.156571
| 0.024136
| 0
| 0
| 0
| 0
| 0.007042
| 1
| 0.016432
| false
| 0
| 0.044601
| 0
| 0.07277
| 0.06338
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
862cf0dcbc5e00c994d2c00c5e16de0409816e8b
| 1,004
|
py
|
Python
|
Betsy/Betsy/modules/get_illumina_control.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 9
|
2017-01-13T02:38:41.000Z
|
2021-04-08T00:44:39.000Z
|
Betsy/Betsy/modules/get_illumina_control.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | null | null | null |
Betsy/Betsy/modules/get_illumina_control.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 4
|
2017-01-05T16:25:25.000Z
|
2019-12-12T20:07:38.000Z
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
import os
import shutil
from genomicode import filelib
in_data = antecedents
result_files = os.listdir(in_data.identifier)
for result_file in result_files:
if '-controls' in result_file:
goal_file = os.path.join(in_data.identifier, result_file)
shutil.copyfile(goal_file, outfile)
assert filelib.exists_nz(outfile), (
'the output file %s for illu_control fails' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
filename = 'control_illumina_' + original_file + '.gct'
return filename
| 29.529412
| 76
| 0.645418
| 111
| 1,004
| 5.54955
| 0.495496
| 0.029221
| 0.051948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.286853
| 1,004
| 33
| 77
| 30.424242
| 0.860335
| 0
| 0
| 0
| 0
| 0
| 0.070929
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.125
| false
| 0
| 0.208333
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
862fb7f41889fb9ebdc1d283480d889b7dbfd294
| 3,144
|
py
|
Python
|
py/WatchDialog.py
|
mathematicalmichael/SpringNodes
|
3ff4034b6e57ee6efa55c963e1819f3d30a2c4ab
|
[
"MIT"
] | 51
|
2015-09-25T09:30:57.000Z
|
2022-01-19T14:16:44.000Z
|
py/WatchDialog.py
|
sabeelcoder/SpringNodes
|
e21a24965474d54369e74d23c06f8c42a7b926b5
|
[
"MIT"
] | 66
|
2015-09-30T02:43:32.000Z
|
2022-03-31T02:26:52.000Z
|
py/WatchDialog.py
|
sabeelcoder/SpringNodes
|
e21a24965474d54369e74d23c06f8c42a7b926b5
|
[
"MIT"
] | 48
|
2015-11-19T01:34:47.000Z
|
2022-02-25T17:26:48.000Z
|
# Copyright(c) 2017, Dimitar Venkov
# @5devene, dimitar.ven@gmail.com
# www.badmonkeys.net
import clr
clr.AddReference('System.Windows.Forms')
clr.AddReference('System.Drawing')
from System.Drawing import Point, Color, Font
from System.Windows.Forms import *
from cStringIO import StringIO
str_file = StringIO()
size1 = [30, 23] #height, width
def tolist(obj1):
if hasattr(obj1,"__iter__"): return obj1
else: return [obj1]
def write_str(str1, GCL, str_file=str_file, size1=size1):
ln1 = len(str1)
if ln1 > size1[1]:
size1[1] = ln1
str_file.write("%s%s\n" % ("".join(GCL), str1) )
def list2str(l1, writeInd, GCL=None, GCint=-1, size1=size1):
if GCL is None:
GCL = []
GCint += 1
GCL.append(None)
for i, x in enumerate(l1):
GCL[GCint] = "[%i] " % i if writeInd else " "
if hasattr(x, "Id"): #is element
write_str("%s %i" % (x.ToString(), x.Id), GCL)
elif hasattr(x, "__iter__"):
if not x:
write_str("Empty List", GCL)
else:
list2str(x, writeInd, GCL, GCint, size1)
elif x is None:
write_str("null", GCL)
else:
write_str(x.ToString(), GCL)
size1[0] += 19
GCL.pop(GCint)
GCint -= 1
class WatchBox(Form):
def __init__(self, t1):
self.Text = "SpringNodes: Expandable Watch Window"
self.BackColor = Color.FromArgb(40,40,40)
self.ControlBox = False
self.TopMost = True
self.FormBorderStyle = FormBorderStyle.Sizable
self.StartPosition = FormStartPosition.CenterScreen
self.Resize += self.resize1
self.text1 = None
self.button1 = Button()
self.button1.Text = 'Close'
self.button1.Font = Font("Calibri", 10)
self.button1.AutoSize = True
self.button1.Width = 200
self.button1.ForeColor = Color.FromArgb(234,234,234)
self.button1.Click += self.save
self.Controls.Add(self.button1)
self.box1 = RichTextBox()
self.box1.Multiline = True
self.box1.Location = Point(5, 5)
self.box1.Font = Font("Calibri", 12)
self.box1.BackColor = Color.FromArgb(53,53,53)
self.box1.ForeColor = Color.FromArgb(234,234,234)
self.box1.DetectUrls = True
self.box1.Text = t1
self.Controls.Add(self.box1)
def adjust_controls(self, height1, width1):
if height1 > 800:
height1 = 800
self.box1.ScrollBars = RichTextBoxScrollBars.Vertical
if width1 < 23 : width1 = 23
if width1 > 88: width1 = 88
self.Width = 10 + (width1 + 2) * 9 #character width seems to vary between PCs
self.Height = height1 + 90
self.box1.Width = self.Width - 17
self.box1.Height = self.Height - 80
self.button1.Location = Point(self.Width/2 - 103, self.Height - 70)
def resize1(self, sender, event):
if self.Width < 210: self.Width = 230
if self.Height < 120: self.Height = 120
self.box1.Width = self.Width - 17
self.box1.Height = self.Height - 80
self.button1.Location = Point(self.Width/2 - 103, self.Height - 70)
def save(self, sender, event):
self.text1 = self.box1.Text
self.Close()
l1 = [] if IN[0] is None else tolist(IN[0])
list2str(l1, IN[1])
str_content = str_file.getvalue()
str_file.close()
width1 = 100
form = WatchBox(str_content)
form.adjust_controls(*size1)
Application.Run(form)
OUT = form.text1
Application.Exit()
form.Dispose()
| 27.578947
| 79
| 0.688613
| 469
| 3,144
| 4.558635
| 0.326226
| 0.056127
| 0.019645
| 0.023386
| 0.128157
| 0.128157
| 0.128157
| 0.095416
| 0.095416
| 0.095416
| 0
| 0.070417
| 0.168893
| 3,144
| 114
| 80
| 27.578947
| 0.747799
| 0.047074
| 0
| 0.084211
| 0
| 0
| 0.048829
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073684
| false
| 0
| 0.042105
| 0
| 0.126316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86311bc6fef14e7f3a84f443854c9a8a4139ce52
| 2,508
|
py
|
Python
|
pyscf/nao/m_comp_coulomb_pack.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 2
|
2019-05-28T05:25:56.000Z
|
2019-11-09T02:16:43.000Z
|
pyscf/nao/m_comp_coulomb_pack.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 2
|
2019-09-16T17:58:31.000Z
|
2019-09-22T17:26:01.000Z
|
pyscf/nao/m_comp_coulomb_pack.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 1
|
2019-11-09T02:13:16.000Z
|
2019-11-09T02:13:16.000Z
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
from pyscf.nao.m_coulomb_am import coulomb_am
import numpy as np
try:
import numba as nb
from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril
use_numba = True
except:
use_numba = False
#
#
#
def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs):
"""
Computes the matrix elements given by funct, for instance coulomb interaction
Args:
sv : (System Variables), this must have arrays of coordinates and species, etc
ao_log : description of functions (either orbitals or product basis functions)
Returns:
matrix elements for the whole system in packed form (lower triangular part)
"""
from pyscf.nao.m_ao_matelem import ao_matelem_c
from pyscf.nao.m_pack2den import ij2pack_l
aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp)
me = ao_matelem_c(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log)
atom2s = np.zeros((sv.natm+1), dtype=np.int64)
for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp]
norbs = atom2s[-1]
res = np.zeros(norbs*(norbs+1)//2, dtype=dtype)
for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):
#print("atom1 = {0}, rv1 = {1}".format(atom1, rv1))
for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):
if atom2>atom1: continue # skip
oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs)
if use_numba:
fill_triu_v2(oo2f, res, s1, f1, s2, f2, norbs)
else:
for i1 in range(s1,f1):
for i2 in range(s2, min(i1+1, f2)):
res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2]
#print("number call = ", count)
#print("sum kernel: {0:.6f}".format(np.sum(abs(res))))
#np.savetxt("kernel_pyscf.txt", res)
#import sys
#sys.exit()
return res, norbs
| 38
| 92
| 0.702153
| 407
| 2,508
| 4.223587
| 0.454545
| 0.020361
| 0.027923
| 0.03025
| 0.075625
| 0.075625
| 0.055846
| 0.055846
| 0.055846
| 0.055846
| 0
| 0.042969
| 0.183413
| 2,508
| 65
| 93
| 38.584615
| 0.796387
| 0.443381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.241379
| 0
| 0.310345
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863143ad0e8c0560ad9359d49f02a31a8146a084
| 2,338
|
py
|
Python
|
nova/tests/unit/test_service_auth.py
|
panguan737/nova
|
0d177185a439baa228b42c948cab4e934d6ac7b8
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/test_service_auth.py
|
panguan737/nova
|
0d177185a439baa228b42c948cab4e934d6ac7b8
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/unit/test_service_auth.py
|
panguan737/nova
|
0d177185a439baa228b42c948cab4e934d6ac7b8
|
[
"Apache-2.0"
] | 1
|
2020-11-02T10:17:13.000Z
|
2020-11-02T10:17:13.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import service_token
import mock
import nova.conf
from nova import context
from nova import service_auth
from nova import test
CONF = nova.conf.CONF
class ServiceAuthTestCase(test.NoDBTestCase):
def setUp(self):
super(ServiceAuthTestCase, self).setUp()
self.ctx = context.RequestContext('fake', 'fake')
self.addCleanup(service_auth.reset_globals)
@mock.patch.object(ks_loading, 'load_auth_from_conf_options')
def test_get_auth_plugin_no_wraps(self, mock_load):
context = mock.MagicMock()
context.get_auth_plugin.return_value = "fake"
result = service_auth.get_auth_plugin(context)
self.assertEqual("fake", result)
mock_load.assert_not_called()
@mock.patch.object(ks_loading, 'load_auth_from_conf_options')
def test_get_auth_plugin_wraps(self, mock_load):
self.flags(send_service_user_token=True, group='service_user')
result = service_auth.get_auth_plugin(self.ctx)
self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper)
@mock.patch.object(ks_loading, 'load_auth_from_conf_options',
return_value=None)
def test_get_auth_plugin_wraps_bad_config(self, mock_load):
"""Tests the case that send_service_user_token is True but there
is some misconfiguration with the [service_user] section which makes
KSA return None for the service user auth.
"""
self.flags(send_service_user_token=True, group='service_user')
result = service_auth.get_auth_plugin(self.ctx)
self.assertEqual(1, mock_load.call_count)
self.assertNotIsInstance(result, service_token.ServiceTokenAuthWrapper)
| 37.709677
| 79
| 0.732678
| 317
| 2,338
| 5.182965
| 0.394322
| 0.029823
| 0.055386
| 0.031041
| 0.256239
| 0.256239
| 0.21972
| 0.21972
| 0.21972
| 0.21972
| 0
| 0.003706
| 0.192044
| 2,338
| 61
| 80
| 38.327869
| 0.866067
| 0.309666
| 0
| 0.1875
| 0
| 0
| 0.076923
| 0.051494
| 0
| 0
| 0
| 0
| 0.15625
| 1
| 0.125
| false
| 0
| 0.21875
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86319d7588f06ccfd5e5e22eadc702136a0fe831
| 552
|
py
|
Python
|
classification/model/build_gen.py
|
LittleWat/MCD_DA
|
37cb1bc38c203702e22c7c0c37e284d0294714fb
|
[
"MIT"
] | 464
|
2018-04-04T22:38:44.000Z
|
2022-03-12T15:46:49.000Z
|
classification/model/build_gen.py
|
seqam-lab/MCD_DA
|
af10217c5c5451dcd8bc3e975a7d067c285cc029
|
[
"MIT"
] | 28
|
2018-05-05T20:01:31.000Z
|
2022-01-16T05:07:56.000Z
|
classification/model/build_gen.py
|
seqam-lab/MCD_DA
|
af10217c5c5451dcd8bc3e975a7d067c285cc029
|
[
"MIT"
] | 147
|
2018-04-10T08:44:10.000Z
|
2021-12-28T02:14:38.000Z
|
import svhn2mnist
import usps
import syn2gtrsb
import syndig2svhn
def Generator(source, target, pixelda=False):
if source == 'usps' or target == 'usps':
return usps.Feature()
elif source == 'svhn':
return svhn2mnist.Feature()
elif source == 'synth':
return syn2gtrsb.Feature()
def Classifier(source, target):
if source == 'usps' or target == 'usps':
return usps.Predictor()
if source == 'svhn':
return svhn2mnist.Predictor()
if source == 'synth':
return syn2gtrsb.Predictor()
| 24
| 45
| 0.63587
| 60
| 552
| 5.85
| 0.333333
| 0.091168
| 0.068376
| 0.079772
| 0.193732
| 0.193732
| 0.193732
| 0.193732
| 0
| 0
| 0
| 0.016787
| 0.244565
| 552
| 22
| 46
| 25.090909
| 0.82494
| 0
| 0
| 0.111111
| 0
| 0
| 0.061706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86329776e65dca78e6c2604731e8b04b13e73992
| 1,318
|
py
|
Python
|
deep_table/nn/models/loss/info_nce_loss.py
|
pfnet-research/deep-table
|
a19c0c3048484017d5f24806604c3b3470bcf550
|
[
"MIT"
] | 48
|
2021-09-30T08:14:26.000Z
|
2022-03-02T12:20:08.000Z
|
deep_table/nn/models/loss/info_nce_loss.py
|
pfnet-research/deep-table
|
a19c0c3048484017d5f24806604c3b3470bcf550
|
[
"MIT"
] | 1
|
2021-11-08T11:41:49.000Z
|
2021-11-08T11:41:49.000Z
|
deep_table/nn/models/loss/info_nce_loss.py
|
pfnet-research/deep-table
|
a19c0c3048484017d5f24806604c3b3470bcf550
|
[
"MIT"
] | 2
|
2021-12-31T03:43:48.000Z
|
2022-03-11T09:04:21.000Z
|
import torch
from torch import Tensor
from torch.nn.modules.loss import _Loss
class InfoNCELoss(_Loss):
"""Info NCE Loss. A type of contrastive loss function used for self-supervised learning.
References:
A. Oord, Y. Li, and O. Vinyals,
"Representation Learning with Contrastive Predictive Coding,"
ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2>
"""
def __init__(self, reduction: str = "sum") -> None:
"""
Args:
reduction (str)
"""
super().__init__(reduction=reduction)
self.reduction = reduction
def forward(self, z_origin: Tensor, z_noisy: Tensor, t: float = 0.7) -> Tensor:
sim = cos_sim_matrix(z_origin, z_noisy)
exp_sim = torch.exp(sim / t)
loss = -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1))
if self.reduction == "sum":
loss = loss.sum()
elif self.reduction == "mean":
loss = loss.mean()
return loss
def cos_sim_matrix(a: Tensor, b: Tensor, eps: float = 1e-8) -> Tensor:
a_n, b_n = a.norm(dim=1), b.norm(dim=1)
a_norm = a / torch.clamp(a_n.unsqueeze(1), min=eps)
b_norm = b / torch.clamp(b_n.unsqueeze(1), min=eps)
sim_matrix = torch.mm(a_norm, b_norm.transpose(0, 1))
return sim_matrix
| 32.95
| 92
| 0.615326
| 189
| 1,318
| 4.126984
| 0.428571
| 0.066667
| 0.030769
| 0.035897
| 0.04359
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034518
| 0.252656
| 1,318
| 39
| 93
| 33.794872
| 0.75736
| 0.226859
| 0
| 0
| 0
| 0
| 0.010406
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8633d44756a388da352b3bc3dd3c8cfc0eeaabfe
| 19,830
|
py
|
Python
|
patroni/config.py
|
korkin25/patroni
|
333d41d9f039b5a799940c8a6fbc75dcbe0e9a31
|
[
"MIT"
] | null | null | null |
patroni/config.py
|
korkin25/patroni
|
333d41d9f039b5a799940c8a6fbc75dcbe0e9a31
|
[
"MIT"
] | null | null | null |
patroni/config.py
|
korkin25/patroni
|
333d41d9f039b5a799940c8a6fbc75dcbe0e9a31
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import shutil
import tempfile
import yaml
from collections import defaultdict
from copy import deepcopy
from patroni import PATRONI_ENV_PREFIX
from patroni.exceptions import ConfigParseError
from patroni.dcs import ClusterConfig
from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler
from patroni.utils import deep_compare, parse_bool, parse_int, patch_config
logger = logging.getLogger(__name__)
_AUTH_ALLOWED_PARAMETERS = (
'username',
'password',
'sslmode',
'sslcert',
'sslkey',
'sslpassword',
'sslrootcert',
'sslcrl',
'sslcrldir',
'gssencmode',
'channel_binding'
)
def default_validator(conf):
if not conf:
return "Config is empty."
class Config(object):
"""
This class is responsible for:
1) Building and giving access to `effective_configuration` from:
* `Config.__DEFAULT_CONFIG` -- some sane default values
* `dynamic_configuration` -- configuration stored in DCS
* `local_configuration` -- configuration from `config.yml` or environment
2) Saving and loading `dynamic_configuration` into 'patroni.dynamic.json' file
located in local_configuration['postgresql']['data_dir'] directory.
This is necessary to be able to restore `dynamic_configuration`
if DCS was accidentally wiped
3) Loading of configuration file in the old format and converting it into new format
4) Mimicking some of the `dict` interfaces to make it possible
to work with it as with the old `config` object.
"""
PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION'
__CACHE_FILENAME = 'patroni.dynamic.json'
__DEFAULT_CONFIG = {
'ttl': 30, 'loop_wait': 10, 'retry_timeout': 10,
'maximum_lag_on_failover': 1048576,
'maximum_lag_on_syncnode': -1,
'check_timeline': False,
'master_start_timeout': 300,
'master_stop_timeout': 0,
'synchronous_mode': False,
'synchronous_mode_strict': False,
'synchronous_node_count': 1,
'standby_cluster': {
'create_replica_methods': '',
'host': '',
'port': '',
'primary_slot_name': '',
'restore_command': '',
'archive_cleanup_command': '',
'recovery_min_apply_delay': ''
},
'postgresql': {
'bin_dir': '',
'use_slots': True,
'parameters': CaseInsensitiveDict({p: v[0] for p, v in ConfigHandler.CMDLINE_OPTIONS.items()
if p not in ('wal_keep_segments', 'wal_keep_size')})
},
'watchdog': {
'mode': 'automatic',
}
}
def __init__(self, configfile, validator=default_validator):
self._modify_index = -1
self._dynamic_configuration = {}
self.__environment_configuration = self._build_environment_configuration()
# Patroni reads the configuration from the command-line argument if it exists, otherwise from the environment
self._config_file = configfile and os.path.exists(configfile) and configfile
if self._config_file:
self._local_configuration = self._load_config_file()
else:
config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None)
self._local_configuration = config_env and yaml.safe_load(config_env) or self.__environment_configuration
if validator:
error = validator(self._local_configuration)
if error:
raise ConfigParseError(error)
self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration)
self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', "")
self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME)
self._load_cache()
self._cache_needs_saving = False
@property
def config_file(self):
return self._config_file
@property
def dynamic_configuration(self):
return deepcopy(self._dynamic_configuration)
def check_mode(self, mode):
return bool(parse_bool(self._dynamic_configuration.get(mode)))
def _load_config_path(self, path):
"""
If path is a file, loads the yml file pointed to by path.
If path is a directory, loads all yml files in that directory in alphabetical order
"""
if os.path.isfile(path):
files = [path]
elif os.path.isdir(path):
files = [os.path.join(path, f) for f in sorted(os.listdir(path))
if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))]
else:
logger.error('config path %s is neither directory nor file', path)
raise ConfigParseError('invalid config path')
overall_config = {}
for fname in files:
with open(fname) as f:
config = yaml.safe_load(f)
patch_config(overall_config, config)
return overall_config
def _load_config_file(self):
"""Loads config.yaml from filesystem and applies some values which were set via ENV"""
config = self._load_config_path(self._config_file)
patch_config(config, self.__environment_configuration)
return config
def _load_cache(self):
if os.path.isfile(self._cache_file):
try:
with open(self._cache_file) as f:
self.set_dynamic_configuration(json.load(f))
except Exception:
logger.exception('Exception when loading file: %s', self._cache_file)
def save_cache(self):
if self._cache_needs_saving:
tmpfile = fd = None
try:
(fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir)
with os.fdopen(fd, 'w') as f:
fd = None
json.dump(self.dynamic_configuration, f)
tmpfile = shutil.move(tmpfile, self._cache_file)
self._cache_needs_saving = False
except Exception:
logger.exception('Exception when saving file: %s', self._cache_file)
if fd:
try:
os.close(fd)
except Exception:
logger.error('Can not close temporary file %s', tmpfile)
if tmpfile and os.path.exists(tmpfile):
try:
os.remove(tmpfile)
except Exception:
logger.error('Can not remove temporary file %s', tmpfile)
# configuration could be either ClusterConfig or dict
def set_dynamic_configuration(self, configuration):
if isinstance(configuration, ClusterConfig):
if self._modify_index == configuration.modify_index:
return False # If the index didn't changed there is nothing to do
self._modify_index = configuration.modify_index
configuration = configuration.data
if not deep_compare(self._dynamic_configuration, configuration):
try:
self.__effective_configuration = self._build_effective_configuration(configuration,
self._local_configuration)
self._dynamic_configuration = configuration
self._cache_needs_saving = True
return True
except Exception:
logger.exception('Exception when setting dynamic_configuration')
def reload_local_configuration(self):
if self.config_file:
try:
configuration = self._load_config_file()
if not deep_compare(self._local_configuration, configuration):
new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration)
self._local_configuration = configuration
self.__effective_configuration = new_configuration
return True
else:
logger.info('No local configuration items changed.')
except Exception:
logger.exception('Exception when reloading local configuration from %s', self.config_file)
@staticmethod
def _process_postgresql_parameters(parameters, is_local=False):
return {name: value for name, value in (parameters or {}).items()
if name not in ConfigHandler.CMDLINE_OPTIONS or
not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)}
def _safe_copy_dynamic_configuration(self, dynamic_configuration):
config = deepcopy(self.__DEFAULT_CONFIG)
for name, value in dynamic_configuration.items():
if name == 'postgresql':
for name, value in (value or {}).items():
if name == 'parameters':
config['postgresql'][name].update(self._process_postgresql_parameters(value))
elif name not in ('connect_address', 'listen', 'data_dir', 'pgpass', 'authentication'):
config['postgresql'][name] = deepcopy(value)
elif name == 'standby_cluster':
for name, value in (value or {}).items():
if name in self.__DEFAULT_CONFIG['standby_cluster']:
config['standby_cluster'][name] = deepcopy(value)
elif name in config: # only variables present in __DEFAULT_CONFIG allowed to be overridden from DCS
if name in ('synchronous_mode', 'synchronous_mode_strict'):
config[name] = value
else:
config[name] = int(value)
return config
@staticmethod
def _build_environment_configuration():
ret = defaultdict(dict)
def _popenv(name):
return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None)
for param in ('name', 'namespace', 'scope'):
value = _popenv(param)
if value:
ret[param] = value
def _fix_log_env(name, oldname):
value = _popenv(oldname)
name = PATRONI_ENV_PREFIX + 'LOG_' + name.upper()
if value and name not in os.environ:
os.environ[name] = value
for name, oldname in (('level', 'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')):
_fix_log_env(name, oldname)
def _set_section_values(section, params):
for param in params:
value = _popenv(section + '_' + param)
if value:
ret[section][param] = value
_set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile', 'keyfile_password',
'cafile', 'ciphers', 'verify_client', 'http_extra_headers',
'https_extra_headers', 'allowlist', 'allowlist_include_members'])
_set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile', 'keyfile_password'])
_set_section_values('postgresql', ['listen', 'connect_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir'])
_set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size',
'dir', 'file_size', 'file_num', 'loggers'])
_set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs', 'password', 'bind_addr'])
for first, second in (('restapi', 'allowlist_include_members'), ('ctl', 'insecure')):
value = ret.get(first, {}).pop(second, None)
if value:
value = parse_bool(value)
if value is not None:
ret[first][second] = value
for second in ('max_queue_size', 'file_size', 'file_num'):
value = ret.get('log', {}).pop(second, None)
if value:
value = parse_int(value)
if value is not None:
ret['log'][second] = value
def _parse_list(value):
if not (value.strip().startswith('-') or '[' in value):
value = '[{0}]'.format(value)
try:
return yaml.safe_load(value)
except Exception:
logger.exception('Exception when parsing list %s', value)
return None
for first, second in (('raft', 'partner_addrs'), ('restapi', 'allowlist')):
value = ret.get(first, {}).pop(second, None)
if value:
value = _parse_list(value)
if value:
ret[first][second] = value
def _parse_dict(value):
if not value.strip().startswith('{'):
value = '{{{0}}}'.format(value)
try:
return yaml.safe_load(value)
except Exception:
logger.exception('Exception when parsing dict %s', value)
return None
for first, params in (('restapi', ('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))):
for second in params:
value = ret.get(first, {}).pop(second, None)
if value:
value = _parse_dict(value)
if value:
ret[first][second] = value
def _get_auth(name, params=None):
ret = {}
for param in params or _AUTH_ALLOWED_PARAMETERS[:2]:
value = _popenv(name + '_' + param)
if value:
ret[param] = value
return ret
restapi_auth = _get_auth('restapi')
if restapi_auth:
ret['restapi']['authentication'] = restapi_auth
authentication = {}
for user_type in ('replication', 'superuser', 'rewind'):
entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS)
if entry:
authentication[user_type] = entry
if authentication:
ret['postgresql']['authentication'] = authentication
for param in list(os.environ.keys()):
if param.startswith(PATRONI_ENV_PREFIX):
# PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..)
name, suffix = (param[8:].split('_', 1) + [''])[:2]
if suffix in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL', 'PROXY',
'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY',
'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT',
'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS',
'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS') and name:
value = os.environ.pop(param)
if suffix == 'PORT':
value = value and parse_int(value)
elif suffix in ('HOSTS', 'PORTS', 'CHECKS'):
value = value and _parse_list(value)
elif suffix in ('LABELS', 'SET_ACLS'):
value = _parse_dict(value)
elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'):
value = parse_bool(value)
if value:
ret[name.lower()][suffix.lower()] = value
for dcs in ('etcd', 'etcd3'):
if dcs in ret:
ret[dcs].update(_get_auth(dcs))
users = {}
for param in list(os.environ.keys()):
if param.startswith(PATRONI_ENV_PREFIX):
name, suffix = (param[8:].rsplit('_', 1) + [''])[:2]
# PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...>
# CREATE USER "<username>" WITH <OPTIONS> PASSWORD '<password>'
if name and suffix == 'PASSWORD':
password = os.environ.pop(param)
if password:
users[name] = {'password': password}
options = os.environ.pop(param[:-9] + '_OPTIONS', None)
options = options and _parse_list(options)
if options:
users[name]['options'] = options
if users:
ret['bootstrap']['users'] = users
return ret
def _build_effective_configuration(self, dynamic_configuration, local_configuration):
config = self._safe_copy_dynamic_configuration(dynamic_configuration)
for name, value in local_configuration.items():
if name == 'postgresql':
for name, value in (value or {}).items():
if name == 'parameters':
config['postgresql'][name].update(self._process_postgresql_parameters(value, True))
elif name != 'use_slots': # replication slots must be enabled/disabled globally
config['postgresql'][name] = deepcopy(value)
elif name not in config or name in ['watchdog']:
config[name] = deepcopy(value) if value else {}
# restapi server expects to get restapi.auth = 'username:password'
if 'restapi' in config and 'authentication' in config['restapi']:
config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication'])
# special treatment for old config
# 'exhibitor' inside 'zookeeper':
if 'zookeeper' in config and 'exhibitor' in config['zookeeper']:
config['exhibitor'] = config['zookeeper'].pop('exhibitor')
config.pop('zookeeper')
pg_config = config['postgresql']
# no 'authentication' in 'postgresql', but 'replication' and 'superuser'
if 'authentication' not in pg_config:
pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config
pg_config['authentication'] = {u: pg_config[u] for u in ('replication', 'superuser') if u in pg_config}
# no 'superuser' in 'postgresql'.'authentication'
if 'superuser' not in pg_config['authentication'] and 'pg_rewind' in pg_config:
pg_config['authentication']['superuser'] = pg_config['pg_rewind']
# handle setting additional connection parameters that may be available
# in the configuration file, such as SSL connection parameters
for name, value in pg_config['authentication'].items():
pg_config['authentication'][name] = {n: v for n, v in value.items() if n in _AUTH_ALLOWED_PARAMETERS}
# no 'name' in config
if 'name' not in config and 'name' in pg_config:
config['name'] = pg_config['name']
updated_fields = (
'name',
'scope',
'retry_timeout',
'synchronous_mode',
'synchronous_mode_strict',
'synchronous_node_count',
'maximum_lag_on_syncnode'
)
pg_config.update({p: config[p] for p in updated_fields if p in config})
return config
def get(self, key, default=None):
return self.__effective_configuration.get(key, default)
def __contains__(self, key):
return key in self.__effective_configuration
def __getitem__(self, key):
return self.__effective_configuration[key]
def copy(self):
return deepcopy(self.__effective_configuration)
| 43.486842
| 120
| 0.580736
| 2,083
| 19,830
| 5.294287
| 0.18723
| 0.036271
| 0.019587
| 0.008886
| 0.222434
| 0.175553
| 0.106456
| 0.08161
| 0.06819
| 0.065288
| 0
| 0.002871
| 0.315028
| 19,830
| 455
| 121
| 43.582418
| 0.809026
| 0.098084
| 0
| 0.206704
| 0
| 0
| 0.154664
| 0.020536
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069832
| false
| 0.039106
| 0.036313
| 0.02514
| 0.181564
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863423445c595d9f921067c5163063a99cb0a68c
| 12,040
|
py
|
Python
|
src/Products/CMFCore/tests/test_DirectoryView.py
|
fdiary/Products.CMFCore
|
361a30e0c72a15a21f88433b8d5fc49331f36728
|
[
"ZPL-2.1"
] | 3
|
2015-11-24T16:26:02.000Z
|
2019-04-09T07:37:12.000Z
|
src/Products/CMFCore/tests/test_DirectoryView.py
|
fdiary/Products.CMFCore
|
361a30e0c72a15a21f88433b8d5fc49331f36728
|
[
"ZPL-2.1"
] | 86
|
2015-09-10T16:25:08.000Z
|
2022-03-17T07:16:30.000Z
|
src/Products/CMFCore/tests/test_DirectoryView.py
|
fdiary/Products.CMFCore
|
361a30e0c72a15a21f88433b8d5fc49331f36728
|
[
"ZPL-2.1"
] | 16
|
2015-08-21T21:35:35.000Z
|
2021-08-04T18:20:55.000Z
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit tests for DirectoryView module.
"""
import sys
import unittest
import warnings
from os import mkdir
from os import remove
from os.path import join
from tempfile import mktemp
from App.config import getConfiguration
from . import _globals
from .base.dummy import DummyFolder
from .base.testcase import FSDVTest
from .base.testcase import WritableFSDVTest
class DirectoryViewPathTests(unittest.TestCase):
"""
These test that, no matter what is stored in their dirpath,
FSDV's will do their best to find an appropriate skin
and only do nothing in the case where an appropriate skin
can't be found.
"""
def setUp(self):
from Products.CMFCore.DirectoryView import addDirectoryViews
from Products.CMFCore.DirectoryView import registerDirectory
registerDirectory('fake_skins', _globals)
self.ob = DummyFolder()
addDirectoryViews(self.ob, 'fake_skins', _globals)
def test__generateKey(self):
from Products.CMFCore.DirectoryView import _generateKey
key = _generateKey('Products.CMFCore', 'tests')
self.assertEqual(key.split(':')[0], 'Products.CMFCore')
subkey = _generateKey('Products.CMFCore', 'tests\foo')
self.assertTrue(subkey.startswith(key))
def test__findProductForPath(self):
from Products.CMFCore.DirectoryView import _findProductForPath
cmfpath = sys.modules['Products.CMFCore'].__path__[0]
self.assertEqual(_findProductForPath(cmfpath),
('Products.CMFCore', ''))
cmfpath = join(cmfpath, 'tests')
self.assertEqual(_findProductForPath(cmfpath),
('Products.CMFCore', 'tests'))
def test_getDirectoryInfo(self):
skin = self.ob.fake_skin
skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin')
self.assertTrue(hasattr(self.ob.fake_skin, 'test1'),
self.ob.fake_skin.getDirPath())
# Test we do nothing if given a really wacky path
def test_UnhandleableExpandPath(self):
file = mktemp()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.ob.fake_skin.manage_properties(file)
self.assertEqual(self.ob.fake_skin.objectIds(), [])
# Check that a warning was raised.
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, UserWarning))
text = ('DirectoryView fake_skin refers to a non-existing path %r'
% file)
self.assertTrue(text in str(w[-1].message))
# this test tests that registerDirectory creates keys in the right format.
def test_registerDirectoryKeys(self):
from Products.CMFCore.DirectoryView import _dirreg
dirs = _dirreg._directories
self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs,
dirs.keys())
self.assertEqual(self.ob.fake_skin.getDirPath(),
'Products.CMFCore.tests:fake_skins/fake_skin')
class DirectoryViewTests(FSDVTest):
def setUp(self):
FSDVTest.setUp(self)
self._registerDirectory(self)
def test_addDirectoryViews(self):
# Test addDirectoryViews
# also test registration of directory views doesn't barf
pass
def test_DirectoryViewExists(self):
# Check DirectoryView added by addDirectoryViews
# appears as a DirectoryViewSurrogate due
# to Acquisition hackery.
from Products.CMFCore.DirectoryView import DirectoryViewSurrogate
self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate))
def test_DirectoryViewMethod(self):
# Check if DirectoryView method works
self.assertEqual(self.ob.fake_skin.test1(), 'test1')
def test_properties(self):
# Make sure the directory view is reading properties
self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope')
def test_ignored(self):
# Test that "artifact" files and dirs are ignored
for name in '#test1', 'CVS', '.test1', 'test1~':
self.assertTrue(name not in self.ob.fake_skin.objectIds(),
'%s not ignored' % name)
def test_surrogate_writethrough(self):
# CMF Collector 316: It is possible to cause ZODB writes because
# setting attributes on the non-persistent surrogate writes them
# into the persistent DirectoryView as well. This is bad in situations
# where you only want to store markers and remove them before the
# transaction has ended - they never got removed because there was
# no equivalent __delattr__ on the surrogate that would clean up
# the persistent DirectoryView as well.
fs = self.ob.fake_skin
test_foo = 'My Foovalue'
fs.foo = test_foo
self.assertEqual(fs.foo, test_foo)
self.assertEqual(fs.__dict__['_real'].foo, test_foo)
del fs.foo
self.assertRaises(AttributeError, getattr, fs, 'foo')
self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo')
class DirectoryViewIgnoreTests(FSDVTest):
def setUp(self):
FSDVTest.setUp(self)
self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py')
self._registerDirectory(self, ignore=self.manual_ign)
def test_ignored(self):
# Test that "artifact" files and dirs are ignored,
# even when a custom ignore list is used; and that the
# custom ignore list is also honored
auto_ign = ('#test1', '.test1', 'test1~')
must_ignore = self.manual_ign + auto_ign + ('test_manual_ignore',)
visible = self.ob.fake_skin.objectIds()
for name in must_ignore:
self.assertFalse(name in visible)
class DirectoryViewFolderTests(FSDVTest):
def setUp(self):
FSDVTest.setUp(self)
self._registerDirectory(self)
def tearDown(self):
from Products.CMFCore import DirectoryView
# This is nasty, but there is no way to unregister anything
# right now...
metatype_registry = DirectoryView._dirreg._meta_types
if 'FOLDER' in metatype_registry:
del metatype_registry['FOLDER']
FSDVTest.tearDown(self)
def test_DirectoryViewMetadata(self):
# Test to determine if metadata shows up correctly on a
# FSDV that has a corresponding .metadata file
testfolder = self.ob.fake_skin.test_directory
self.assertEqual(testfolder.title, 'test_directory Title')
def test_DirectoryViewMetadataOnPropertyManager(self):
# Test to determine if metadata shows up correctly on a
# FSDV that has a corresponding .metadata file
testfolder = self.ob.fake_skin.test_directory
self.assertEqual(testfolder.getProperty('title'),
'test_directory Title')
def test_DirectoryViewFolderDefault(self):
# Test that a folder inside the fake skin really is of type
# DirectoryViewSurrogate
from Products.CMFCore.DirectoryView import DirectoryViewSurrogate
testfolder = self.ob.fake_skin.test_directory
self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate))
def test_DirectoryViewFolderCustom(self):
# Now we register a different class under the fake meta_type
# "FOLDER" and test again...
from Products.CMFCore.DirectoryView import DirectoryView
from Products.CMFCore.DirectoryView import registerMetaType
class DummyDirectoryViewSurrogate:
pass
class DummyDirectoryView(DirectoryView):
def __of__(self, parent):
return DummyDirectoryViewSurrogate()
registerMetaType('FOLDER', DummyDirectoryView)
# In order to regenerate the FSDV data we need to remove and
# register again, that way the newly registered meta_type is used
self.ob._delObject('fake_skin')
self._registerDirectory(self)
testfolder = self.ob.fake_skin.test_directory
self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate))
class DebugModeTests(WritableFSDVTest):
def setUp(self):
from Products.CMFCore.DirectoryView import _dirreg
WritableFSDVTest.setUp(self)
self.saved_cfg_debug_mode = getConfiguration().debug_mode
getConfiguration().debug_mode = True
# initialise skins
self._registerDirectory(self)
# add a method to the fake skin folder
self._writeFile('test2.py', "return 'test2'")
# edit the test1 method
self._writeFile('test1.py', "return 'new test1'")
# add a new folder
mkdir(join(self.skin_path_name, 'test3'))
info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath)
info.reload()
self.use_dir_mtime = info.use_dir_mtime
def tearDown(self):
getConfiguration().debug_mode = self.saved_cfg_debug_mode
WritableFSDVTest.tearDown(self)
def test_AddNewMethod(self):
# See if a method added to the skin folder can be found
self.assertEqual(self.ob.fake_skin.test2(), 'test2')
def test_EditMethod(self):
# See if an edited method exhibits its new behaviour
self.assertEqual(self.ob.fake_skin.test1(), 'new test1')
def test_DeleteMethod(self):
# Make sure a deleted method goes away
remove(join(self.skin_path_name, 'test2.py'))
self.assertFalse(hasattr(self.ob.fake_skin, 'test2'))
def test_DeleteAddEditMethod(self):
# Check that if we delete a method, then add it back,
# then edit it, the DirectoryView notices.
# This exercises yet another Win32 mtime weirdity.
remove(join(self.skin_path_name, 'test2.py'))
self.assertFalse(hasattr(self.ob.fake_skin, 'test2'))
# add method back to the fake skin folder
self._writeFile('test2.py', "return 'test2.2'",
self.use_dir_mtime)
# check
self.assertEqual(self.ob.fake_skin.test2(), 'test2.2')
# edit method
self._writeFile('test2.py', "return 'test2.3'",
self.use_dir_mtime)
# check
self.assertEqual(self.ob.fake_skin.test2(), 'test2.3')
def test_NewFolder(self):
# See if a new folder shows up
self.assertFalse(hasattr(self.ob.fake_skin, 'test3'))
def test_DeleteFolder(self):
# Make sure a deleted folder goes away
self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory'))
# It has a file, which we need to delete first.
self.assertTrue(hasattr(self.ob.fake_skin.test_directory,
'README.txt'))
self._deleteFile(join('test_directory', 'README.txt'),
self.use_dir_mtime)
self._deleteDirectory('test_directory', self.use_dir_mtime)
self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory'))
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DirectoryViewPathTests))
suite.addTest(unittest.makeSuite(DirectoryViewTests))
suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests))
suite.addTest(unittest.makeSuite(DirectoryViewFolderTests))
suite.addTest(unittest.makeSuite(DebugModeTests))
return suite
| 38.222222
| 79
| 0.66603
| 1,385
| 12,040
| 5.662816
| 0.256318
| 0.035701
| 0.035701
| 0.048196
| 0.359301
| 0.294148
| 0.231417
| 0.168303
| 0.145353
| 0.133112
| 0
| 0.00566
| 0.23696
| 12,040
| 314
| 80
| 38.343949
| 0.848046
| 0.233555
| 0
| 0.19774
| 0
| 0
| 0.087325
| 0.016708
| 0
| 0
| 0
| 0
| 0.19209
| 1
| 0.175141
| false
| 0.011299
| 0.129944
| 0.00565
| 0.355932
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8634627ed49b276d745b67db33bc1b7a02ae6c52
| 4,247
|
py
|
Python
|
pycycle/elements/flight_conditions.py
|
eshendricks/pyCycle
|
2b7f9c2a60c6d93d5e561c71b27e75566b3baef0
|
[
"Apache-2.0"
] | null | null | null |
pycycle/elements/flight_conditions.py
|
eshendricks/pyCycle
|
2b7f9c2a60c6d93d5e561c71b27e75566b3baef0
|
[
"Apache-2.0"
] | null | null | null |
pycycle/elements/flight_conditions.py
|
eshendricks/pyCycle
|
2b7f9c2a60c6d93d5e561c71b27e75566b3baef0
|
[
"Apache-2.0"
] | null | null | null |
import openmdao.api as om
from pycycle.thermo.cea import species_data
from pycycle.constants import AIR_ELEMENTS
from pycycle.elements.ambient import Ambient
from pycycle.elements.flow_start import FlowStart
class FlightConditions(om.Group):
"""Determines total and static flow properties given an altitude and Mach number using the input atmosphere model"""
def initialize(self):
self.options.declare('thermo_method', default='CEA', values=('CEA',),
desc='Method for computing thermodynamic properties')
self.options.declare('thermo_data', default=species_data.janaf,
desc='thermodynamic data set', recordable=False)
self.options.declare('elements', default=AIR_ELEMENTS,
desc='set of elements present in the flow')
self.options.declare('use_WAR', default=False, values=[True, False],
desc='If True, includes WAR calculation')
def setup(self):
thermo_method = self.options['thermo_method']
thermo_data = self.options['thermo_data']
elements = self.options['elements']
use_WAR = self.options['use_WAR']
self.add_subsystem('ambient', Ambient(), promotes=('alt', 'dTs')) # inputs
conv = self.add_subsystem('conv', om.Group(), promotes=['*'])
if use_WAR == True:
proms = ['Fl_O:*', 'MN', 'W', 'WAR']
else:
proms = ['Fl_O:*', 'MN', 'W']
conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method,
thermo_data=thermo_data,
elements=elements,
use_WAR=use_WAR),
promotes=proms)
balance = conv.add_subsystem('balance', om.BalanceComp())
balance.add_balance('Tt', val=500.0, lower=1e-4, units='degR', desc='Total temperature', eq_units='degR')
balance.add_balance('Pt', val=14.696, lower=1e-4, units='psi', desc='Total pressure', eq_units='psi')
# sub.set_order(['fs','balance'])
newton = conv.nonlinear_solver = om.NewtonSolver()
newton.options['atol'] = 1e-10
newton.options['rtol'] = 1e-10
newton.options['maxiter'] = 10
newton.options['iprint'] = -1
newton.options['solve_subsystems'] = True
newton.options['reraise_child_analysiserror'] = False
newton.linesearch = om.BoundsEnforceLS()
newton.linesearch.options['bound_enforcement'] = 'scalar'
newton.linesearch.options['iprint'] = -1
# newton.linesearch.options['solve_subsystems'] = True
conv.linear_solver = om.DirectSolver(assemble_jac=True)
self.connect('ambient.Ps', 'balance.rhs:Pt')
self.connect('ambient.Ts', 'balance.rhs:Tt')
self.connect('balance.Pt', 'fs.P')
self.connect('balance.Tt', 'fs.T')
self.connect('Fl_O:stat:P', 'balance.lhs:Pt')
self.connect('Fl_O:stat:T', 'balance.lhs:Tt')
# self.set_order(['ambient', 'subgroup'])
if __name__ == "__main__":
p1 = om.Problem()
p1.model = om.Group()
des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp())
des_vars.add_output('W', 0.0, units='lbm/s')
des_vars.add_output('alt', 1., units='ft')
des_vars.add_output('MN', 0.5)
des_vars.add_output('dTs', 0.0, units='degR')
fc = p1.model.add_subsystem("fc", FlightConditions())
p1.model.connect('des_vars.W', 'fc.W')
p1.model.connect('des_vars.alt', 'fc.alt')
p1.model.connect('des_vars.MN', 'fc.MN')
p1.model.connect('des_vars.dTs', 'fc.dTs')
p1.setup()
# p1.root.list_connections()
p1['des_vars.alt'] = 17868.79060515557
p1['des_vars.MN'] = 2.101070288213628
p1['des_vars.dTs'] = 0.0
p1['des_vars.W'] = 1.0
p1.run_model()
print('Ts_atm: ', p1['fc.ambient.Ts'])
print('Ts_set: ', p1['fc.Fl_O:stat:T'])
print('Ps_atm: ', p1['fc.ambient.Ps'])
print('Ps_set: ', p1['fc.Fl_O:stat:P'])
print('rhos_atm: ', p1['fc.ambient.rhos']*32.175)
print('rhos_set: ', p1['fc.Fl_O:stat:rho'])
print('W', p1['fc.Fl_O:stat:W'])
print('Pt: ', p1['fc.Fl_O:tot:P'])
| 38.261261
| 120
| 0.600895
| 548
| 4,247
| 4.50365
| 0.286496
| 0.039708
| 0.017018
| 0.014182
| 0.079011
| 0.017018
| 0
| 0
| 0
| 0
| 0
| 0.029039
| 0.237815
| 4,247
| 110
| 121
| 38.609091
| 0.733395
| 0.063574
| 0
| 0
| 0
| 0
| 0.215024
| 0.006806
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.064103
| 0
| 0.102564
| 0.128205
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8634b2f385acdad2561bde76c51b0f6fb67361d8
| 2,806
|
py
|
Python
|
samples/modules/tensorflow/magic_wand/train/data_split_person.py
|
lviala-zaack/zephyr
|
bf3c6e7ba415dd85f1b68eb69ea2779b234c686f
|
[
"Apache-2.0"
] | 6,224
|
2016-06-24T20:04:19.000Z
|
2022-03-31T20:33:45.000Z
|
samples/modules/tensorflow/magic_wand/train/data_split_person.py
|
Conexiotechnologies/zephyr
|
fde24ac1f25d09eb9722ce4edc6e2d3f844b5bce
|
[
"Apache-2.0"
] | 32,027
|
2017-03-24T00:02:32.000Z
|
2022-03-31T23:45:53.000Z
|
samples/modules/tensorflow/magic_wand/train/data_split_person.py
|
Conexiotechnologies/zephyr
|
fde24ac1f25d09eb9722ce4edc6e2d3f844b5bce
|
[
"Apache-2.0"
] | 4,374
|
2016-08-11T07:28:47.000Z
|
2022-03-31T14:44:59.000Z
|
# Lint as: python3
# coding=utf-8
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Split data into train, validation and test dataset according to person.
That is, use some people's data as train, some other people's data as
validation, and the rest ones' data as test. These data would be saved
separately under "/person_split".
It will generate new files with the following structure:
├──person_split
│ ├── test
│ ├── train
│ └──valid
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from data_split import read_data
from data_split import write_data
def person_split(whole_data, train_names, valid_names, test_names):
"""Split data by person."""
random.seed(30)
random.shuffle(whole_data)
train_data = []
valid_data = []
test_data = []
for idx, data in enumerate(whole_data): # pylint: disable=unused-variable
if data["name"] in train_names:
train_data.append(data)
elif data["name"] in valid_names:
valid_data.append(data)
elif data["name"] in test_names:
test_data.append(data)
print("train_length:" + str(len(train_data)))
print("valid_length:" + str(len(valid_data)))
print("test_length:" + str(len(test_data)))
return train_data, valid_data, test_data
if __name__ == "__main__":
data = read_data("./data/complete_data")
train_names = [
"hyw", "shiyun", "tangsy", "dengyl", "jiangyh", "xunkai", "negative3",
"negative4", "negative5", "negative6"
]
valid_names = ["lsj", "pengxl", "negative2", "negative7"]
test_names = ["liucx", "zhangxy", "negative1", "negative8"]
train_data, valid_data, test_data = person_split(data, train_names,
valid_names, test_names)
if not os.path.exists("./person_split"):
os.makedirs("./person_split")
write_data(train_data, "./person_split/train")
write_data(valid_data, "./person_split/valid")
write_data(test_data, "./person_split/test")
| 36.921053
| 125
| 0.653956
| 371
| 2,806
| 4.781671
| 0.423181
| 0.055806
| 0.029312
| 0.03044
| 0.131905
| 0.11274
| 0.068771
| 0
| 0
| 0
| 0
| 0.009066
| 0.213828
| 2,806
| 75
| 126
| 37.413333
| 0.788305
| 0.395937
| 0
| 0
| 0
| 0
| 0.175165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.179487
| 0
| 0.230769
| 0.102564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86350332b9c46bb259c547e1b3c963ac7c8f647c
| 10,632
|
py
|
Python
|
tests/k8s_handler.py
|
josebalius/go-spacemesh
|
7ad61dcbe30f361b348e93c97eb3871ab79f1848
|
[
"MIT"
] | 586
|
2017-12-27T10:29:30.000Z
|
2022-03-21T00:25:54.000Z
|
tests/k8s_handler.py
|
josebalius/go-spacemesh
|
7ad61dcbe30f361b348e93c97eb3871ab79f1848
|
[
"MIT"
] | 2,542
|
2017-12-27T11:23:12.000Z
|
2022-03-31T18:40:52.000Z
|
tests/k8s_handler.py
|
josebalius/go-spacemesh
|
7ad61dcbe30f361b348e93c97eb3871ab79f1848
|
[
"MIT"
] | 162
|
2017-12-27T13:37:12.000Z
|
2022-03-25T09:15:13.000Z
|
from datetime import datetime
from kubernetes import client
from kubernetes.client.rest import ApiException
import os
import time
import yaml
from tests import config as conf
import tests.utils as ut
def remove_clusterrole_binding(shipper_name, crb_name):
# remove clusterrolebind
k8s_client = client.RbacAuthorizationV1Api()
try:
k8s_client.delete_cluster_role_binding(crb_name)
print(f"\nsuccessfully deleted: {crb_name}")
except Exception as e:
print(f"\n{shipper_name} cluster role binding deletion has failed, please manually delete {crb_name}:")
print(f"kubectl delete clusterrolebinding {crb_name}")
def filebeat_teardown(namespace):
# remove clusterrolebind
# TODO: find a solution for sharing the name both here and in the kube object
crb_name = f"filebeat-cluster-role-binding-{namespace}"
remove_clusterrole_binding("filebeat", crb_name)
def fluent_bit_teardown(namespace):
# remove clusterrolebind
# TODO: find a solution for sharing the name both here and in the kube object
crb_name = f"fluent-bit-clusterrole-binding-{namespace}"
remove_clusterrole_binding("fluent-bit", crb_name)
def add_elastic_cluster(namespace):
print("\nDeploying ElasticSearch\n")
add_deployment_dir(namespace, conf.ELASTIC_CONF_DIR)
def add_filebeat_cluster(namespace):
print("\nDeploying FileBeat\n")
add_deployment_dir(namespace, conf.FILEBEAT_CONF_DIR)
def add_fluent_bit_cluster(namespace):
print("\nDeploying Fluent-bit\n")
add_deployment_dir(namespace, conf.FLUENT_BIT_CONF_DIR)
def add_kibana_cluster(namespace):
print("\nDeploying Kibana\n")
add_deployment_dir(namespace, conf.KIBANA_CONF_DIR)
def add_logstash_cluster(namespace):
print("\nDeploying LogStash\n")
add_deployment_dir(namespace, conf.LOGSTASH_CONF_DIR)
def add_deployment_dir(namespace, dir_path, delete=False):
with open(os.path.join(dir_path, 'dep_order.txt')) as f:
dep_order = f.readline()
dep_lst = [x.strip() for x in dep_order.split(',')]
print(dep_lst)
phrases_to_replace = ["(?<!_)NAMESPACE", "REP_ES_USER", "REP_ES_PASS"]
values_for_replacement = [namespace, conf.ES_USER_LOCAL, conf.ES_PASS_LOCAL]
for filename in dep_lst:
# replace all phrases with the actual values if exists
modified_file_path, is_change = ut.duplicate_file_and_replace_phrases(
dir_path, filename, f"{namespace}_{filename}", phrases_to_replace, values_for_replacement
)
print(f"applying file: {filename}")
with open(modified_file_path) as f:
dep = yaml.safe_load(f)
if modified_file_path != os.path.join(dir_path, filename) and is_change:
# remove modified file
ut.delete_file(modified_file_path)
name = dep["metadata"]["name"]
if dep['kind'] == 'StatefulSet':
k8s_client = client.AppsV1Api()
if not delete:
k8s_client.create_namespaced_stateful_set(body=dep, namespace=namespace)
else:
k8s_client.delete_namespaced_stateful_set(name=name, namespace=namespace)
elif dep['kind'] == 'DaemonSet':
k8s_client = client.AppsV1Api()
k8s_client.create_namespaced_daemon_set(body=dep, namespace=namespace)
elif dep['kind'] == 'Deployment':
k8s_client = client.AppsV1Api()
k8s_client.create_namespaced_deployment(body=dep, namespace=namespace)
elif dep['kind'] == 'Service':
try:
k8s_client = client.CoreV1Api()
k8s_client.create_namespaced_service(body=dep, namespace=namespace)
except ApiException as e:
if e.status == 409:
print(f"Service exists: {dep['metadata']['name']}")
continue
raise e
elif dep['kind'] == 'PodDisruptionBudget':
k8s_client = client.PolicyV1beta1Api()
k8s_client.create_namespaced_pod_disruption_budget(body=dep, namespace=namespace)
elif dep["kind"] == 'Role':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.create_namespaced_role(body=dep, namespace=namespace)
elif dep["kind"] == 'ClusterRole':
try:
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.create_cluster_role(body=dep)
except ApiException as e:
if e.status == 409:
print(f"cluster role already exists")
continue
raise e
elif dep["kind"] == 'RoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
dep["subjects"][0]["namespace"] = namespace
k8s_client.create_namespaced_role_binding(body=dep, namespace=namespace)
elif dep["kind"] == 'ClusterRoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
try:
k8s_client.create_cluster_role_binding(body=dep)
except ApiException as e:
if e.status == 409:
print(f"cluster role binding already exists")
continue
raise e
elif dep["kind"] == 'ConfigMap':
k8s_client = client.CoreV1Api()
k8s_client.create_namespaced_config_map(body=dep, namespace=namespace)
elif dep["kind"] == 'ServiceAccount':
k8s_client = client.CoreV1Api()
k8s_client.create_namespaced_service_account(body=dep, namespace=namespace)
print("\nDone\n")
def remove_deployment_dir(namespace, dir_path):
with open(os.path.join(dir_path, 'dep_order.txt')) as f:
dep_order = f.readline()
dep_lst = [x.strip() for x in dep_order.split(',')]
print(dep_lst)
for filename in dep_lst:
print(f"deleting {filename}")
with open(os.path.join(dir_path, filename)) as f:
dep = yaml.safe_load(f)
name = dep["metadata"]["name"]
if dep['kind'] == 'StatefulSet':
k8s_client = client.AppsV1Api()
k8s_client.delete_namespaced_stateful_set(name=name, namespace=namespace)
elif dep['kind'] == 'DaemonSet':
k8s_client = client.AppsV1Api()
k8s_client.delete_namespaced_daemon_set(name=name, namespace=namespace)
elif dep['kind'] == 'Deployment':
k8s_client = client.AppsV1Api()
k8s_client.delete_namespaced_deployment(name=name, namespace=namespace)
elif dep['kind'] == 'Service':
k8s_client = client.CoreV1Api()
k8s_client.delete_namespaced_service(name=name, namespace=namespace, grace_period_seconds=0)
delete_func = k8s_client.delete_namespaced_service
list_func = k8s_client.list_namespaced_service
wait_for_namespaced_deletion(name, namespace, delete_func, list_func)
elif dep['kind'] == 'PodDisruptionBudget':
k8s_client = client.PolicyV1beta1Api()
k8s_client.delete_namespaced_pod_disruption_budget(name=name, namespace=namespace)
elif dep["kind"] == 'Role':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.delete_namespaced_role(name=name, namespace=namespace)
elif dep["kind"] == 'RoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.delete_namespaced_role_binding(name=name, namespace=namespace)
elif dep["kind"] == 'ClusterRoleBinding':
k8s_client = client.RbacAuthorizationV1Api()
k8s_client.delete_cluster_role_binding(name=name)
elif dep["kind"] == 'ConfigMap':
k8s_client = client.CoreV1Api()
k8s_client.delete_namespaced_config_map(name=name, namespace=namespace)
elif dep["kind"] == 'ServiceAccount':
k8s_client = client.CoreV1Api()
k8s_client.delete_namespaced_service_account(name=name, namespace=namespace)
print("\nDone\n")
def wait_for_namespaced_deletion(name, namespace, deletion_func, list_func, timeout=15):
deleted = False
orig_timeout = timeout
while not deleted:
# find by name and delete requested item
for item in list_func(namespace).items:
if item.metadata.name == name:
if timeout < 0:
raise TimeoutError(f"{orig_timeout} was not enough for deleting item:\n{item}\n")
deletion_func(name=name, namespace=namespace)
print(f"service {name} was not deleted, retrying")
time.sleep(1)
timeout -= 1
# validate item was deleted
for item in list_func(namespace).items:
deleted = True
if item.metadata.name == name:
deleted = False
return deleted
def wait_for_daemonset_to_be_ready(name, namespace, timeout=None):
wait_for_to_be_ready("daemonset", name, namespace, timeout=timeout)
def resolve_read_status_func(obj_name):
if obj_name == "daemonset":
return client.AppsV1Api().read_namespaced_daemon_set_status
else:
raise ValueError(f"resolve_read_status_func: {obj_name} is not a valid value")
def wait_for_to_be_ready(obj_name, name, namespace, timeout=None):
start = datetime.now()
while True:
read_func = resolve_read_status_func(obj_name)
resp = read_func(name=name, namespace=namespace)
total_sleep_time = (datetime.now()-start).total_seconds()
number_ready = resp.status.number_ready
updated_number_scheduled = resp.status.updated_number_scheduled
if number_ready and updated_number_scheduled and number_ready == updated_number_scheduled:
print("Total time waiting for {3} {0} [size: {1}]: {2} sec".format(name, number_ready, total_sleep_time,
obj_name))
break
print("{0}/{1} pods ready {2} sec ".format(number_ready, updated_number_scheduled, total_sleep_time), end="\r")
time.sleep(1)
if timeout and total_sleep_time > timeout:
raise Exception(f"Timeout waiting for {obj_name} to be ready")
| 44.3
| 133
| 0.630267
| 1,207
| 10,632
| 5.299917
| 0.158244
| 0.066125
| 0.051587
| 0.054713
| 0.586525
| 0.499297
| 0.42473
| 0.35501
| 0.31718
| 0.279037
| 0
| 0.012487
| 0.2769
| 10,632
| 239
| 134
| 44.485356
| 0.819589
| 0.033766
| 0
| 0.443299
| 0
| 0
| 0.127266
| 0.015104
| 0
| 0
| 0
| 0.004184
| 0
| 1
| 0.072165
| false
| 0.010309
| 0.041237
| 0
| 0.123711
| 0.103093
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863721243454a95cc68c80d7a3e2d4352bbe5a24
| 2,718
|
py
|
Python
|
natlas-agent/config.py
|
m4rcu5/natlas
|
d1057c5349a5443cecffb3db9a6428f7271b07ad
|
[
"Apache-2.0"
] | null | null | null |
natlas-agent/config.py
|
m4rcu5/natlas
|
d1057c5349a5443cecffb3db9a6428f7271b07ad
|
[
"Apache-2.0"
] | null | null | null |
natlas-agent/config.py
|
m4rcu5/natlas
|
d1057c5349a5443cecffb3db9a6428f7271b07ad
|
[
"Apache-2.0"
] | null | null | null |
import os
from dotenv import load_dotenv
class Config:
# Current Version
NATLAS_VERSION = "0.6.10"
BASEDIR = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(BASEDIR, '.env'))
def get_int(self, varname):
tmp = os.environ.get(varname)
if tmp:
return int(tmp)
return None
def get_bool(self, varname):
tmp = os.environ.get(varname)
if tmp and tmp.upper() == "TRUE":
return True
if tmp and tmp.upper() == "FALSE":
return False
return None
def __init__(self):
# url of server to get/submit work from/to
self.server = os.environ.get('NATLAS_SERVER_ADDRESS') or 'http://127.0.0.1:5000'
# ignore warnings about SSL connections
# you shouldn't ignore ssl warnings, but I'll give you the option
# Instead, you should put the trusted CA certificate bundle on the agent and use the REQUESTS_CA_BUNDLE env variable
self.ignore_ssl_warn = self.get_bool('NATLAS_IGNORE_SSL_WARN') or False
# maximum number of threads to utilize
self.max_threads = self.get_int('NATLAS_MAX_THREADS') or 3
# Are we allowed to scan local addresses?
# By default, agents protect themselves from scanning their local network
self.scan_local = self.get_bool('NATLAS_SCAN_LOCAL') or False
# default time to wait for the server to respond
self.request_timeout = self.get_int('NATLAS_REQUEST_TIMEOUT') or 15 # seconds
# Maximum value for exponential backoff of requests, 5 minutes default
self.backoff_max = self.get_int('NATLAS_BACKOFF_MAX') or 300 # seconds
# Base value to begin the exponential backoff
self.backoff_base = self.get_int('NATLAS_BACKOFF_BASE') or 1 # seconds
# Maximum number of times to retry submitting data before giving up
# This is useful if a thread is submitting data that the server doesn't understand for some reason
self.max_retries = self.get_int('NATLAS_MAX_RETRIES') or 10
# Identification string that identifies the agent that performed any given scan
# Used for database lookup and stored in scan output
self.agent_id = os.environ.get("NATLAS_AGENT_ID") or None
# Authentication token that agents can use to talk to the server API
# Only needed if the server is configured to require agent authentication
self.auth_token = os.environ.get("NATLAS_AGENT_TOKEN") or None
# Optionally save files that failed to upload
self.save_fails = self.get_bool("NATLAS_SAVE_FAILS") or False
# Allow version overrides for local development
# Necessary to test versioned host data templates before release
self.version_override = os.environ.get("NATLAS_VERSION_OVERRIDE") or None
self.sentry_dsn = os.environ.get("SENTRY_DSN") or None
if self.version_override:
self.NATLAS_VERSION = self.version_override
| 36.24
| 118
| 0.756439
| 430
| 2,718
| 4.630233
| 0.395349
| 0.028127
| 0.04219
| 0.040181
| 0.117027
| 0.038172
| 0.038172
| 0.038172
| 0.038172
| 0
| 0
| 0.010601
| 0.167035
| 2,718
| 74
| 119
| 36.72973
| 0.868816
| 0.437822
| 0
| 0.117647
| 0
| 0
| 0.18484
| 0.058511
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.058824
| 0
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
86375b708be1e1e74cc333322674e530709cceeb
| 4,663
|
py
|
Python
|
rdr2019/mcmc_lc_jla_fit.py
|
rubind/host_unity
|
a1908d80a8b6354e4516cccbf2b1a214cbc7daa9
|
[
"MIT"
] | null | null | null |
rdr2019/mcmc_lc_jla_fit.py
|
rubind/host_unity
|
a1908d80a8b6354e4516cccbf2b1a214cbc7daa9
|
[
"MIT"
] | 3
|
2021-03-16T17:19:23.000Z
|
2021-03-24T17:05:05.000Z
|
rdr2019/mcmc_lc_jla_fit.py
|
rubind/host_unity
|
a1908d80a8b6354e4516cccbf2b1a214cbc7daa9
|
[
"MIT"
] | null | null | null |
import os
import sys
import click
import pickle
import sncosmo
import numpy as np
from astropy.table import Table
DATA_PATH = '/home/samdixon/jla_light_curves/'
def modify_error(lc, error_floor=0.):
"""Add an error floor of `error_floor` times the maximum flux of the band
to each observation
"""
data = sncosmo.photdata.photometric_data(lc).normalized(zp=25., zpsys='ab')
new_lc = {'time': data.time,
'band': data.band,
'flux': data.flux,
'fluxerr': data.fluxerr,
'zp': data.zp,
'zpsys': data.zpsys}
for band in set(data.band):
band_cut = data.band==band
max_flux_in_band = np.max(data.flux[band_cut])
new_lc['fluxerr'][band_cut] = np.sqrt((error_floor*max_flux_in_band)**2+data.fluxerr[band_cut]**2)
new_lc = Table(new_lc, meta=lc.meta)
return new_lc
def fit_lc_and_save(lc, model_name, save_dir, no_mc):
name = lc.meta['SN']
model = sncosmo.Model(source=model_name,
effects=[sncosmo.CCM89Dust()],
effect_names=['mw'],
effect_frames=['obs'])
if type(name) is float:
name = int(name)
z = lc.meta['Z_HELIO']
mwebv = lc.meta['MWEBV']
bounds = {}
try:
t0 = float(lc.meta['DayMax'].split()[0])
bounds['t0'] = (t0-5, t0+5)
except KeyError:
try:
t0 = np.mean(lc['Date'])
bounds['t0'] = (min(lc['Date'])-20, max(lc['Date']))
except KeyError:
t0 = np.mean(lc['time'])
bounds['t0'] = (min(lc['time'])-20, max(lc['time']))
bounds['z'] = ((1-1e-4)*z, (1+1e-4)*z)
for param_name in model.source.param_names[1:]:
bounds[param_name] = (-50, 50)
modelcov = model_name=='salt2'
model.set(z=z, t0=t0, mwebv=mwebv)
phase_range = (-15, 45) if model_name=='salt2' else (-10, 40)
wave_range = (3000, 7000) if model_name=='salt2' else None
save_path = os.path.join(save_dir, '{}.pkl'.format(name))
try:
minuit_result, minuit_fit_model = sncosmo.fit_lc(lc, model, model.param_names[:-2], bounds=bounds,
phase_range=phase_range, wave_range=wave_range,
warn=False, modelcov=modelcov)
if not no_mc:
emcee_result, emcee_fit_model = sncosmo.mcmc_lc(sncosmo.select_data(lc, minuit_result['data_mask']),
minuit_fit_model,
model.param_names[:-2],
guess_t0=False,
bounds=bounds,
warn=False,
nwalkers=40,
modelcov=modelcov)
pickle.dump(emcee_result, open(save_path, 'wb'))
else:
pickle.dump(minuit_result, open(save_path, 'wb'))
except:
print('Fit to {} failed'.format(name))
sys.stdout.flush()
def main():
model_name, start, finish, err_floor, no_mc = sys.argv[1:]
start = int(start)
finish = int(finish)
err_floor = float(err_floor)
no_mc = bool(int(no_mc))
if no_mc:
save_dir = '/home/samdixon/host_unity/fitting/results_mw_reddening/jla_{}_{:02d}'.format(model_name, int(err_floor*100))
else:
save_dir = '/home/samdixon/host_unity/fitting/results_mw_reddening_mcmc/jla_{}_{:02d}'.format(model_name, int(err_floor*100))
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
lcs = []
for f in os.listdir(DATA_PATH)[int(start):int(finish)]:
if f[:2] == 'lc':
lc = sncosmo.read_lc(os.path.join(DATA_PATH, f), format='salt2', expand_bands=True, read_covmat=True)
lc = modify_error(lc, err_floor)
name = lc.meta['SN']
if type(name) is float:
name = int(name)
load_path = os.path.join(save_dir, '{}.pkl'.format(name))
try:
pickle.load(open(load_path, 'rb'))
print('{}: loaded'.format(name))
sys.stdout.flush()
except IOError:
print('Fitting {}'.format(name))
sys.stdout.flush()
fit_lc_and_save(lc, model_name, save_dir, no_mc)
else:
continue
if __name__=='__main__':
main()
| 39.516949
| 133
| 0.51855
| 575
| 4,663
| 3.998261
| 0.276522
| 0.035233
| 0.018269
| 0.024793
| 0.251849
| 0.162679
| 0.162679
| 0.162679
| 0.138321
| 0.107873
| 0
| 0.023739
| 0.34956
| 4,663
| 117
| 134
| 39.854701
| 0.734257
| 0.019301
| 0
| 0.174757
| 0
| 0
| 0.078763
| 0.037955
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029126
| false
| 0
| 0.067961
| 0
| 0.106796
| 0.029126
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863791c55712e28d3fe1488aacf0c833eaf8ff5c
| 11,011
|
py
|
Python
|
openmdao/core/tests/test_system.py
|
toddrme2178/OpenMDAO
|
379cc6216d13d380e11cb3a46f03960981de4660
|
[
"Apache-2.0"
] | null | null | null |
openmdao/core/tests/test_system.py
|
toddrme2178/OpenMDAO
|
379cc6216d13d380e11cb3a46f03960981de4660
|
[
"Apache-2.0"
] | null | null | null |
openmdao/core/tests/test_system.py
|
toddrme2178/OpenMDAO
|
379cc6216d13d380e11cb3a46f03960981de4660
|
[
"Apache-2.0"
] | 1
|
2018-07-27T06:39:15.000Z
|
2018-07-27T06:39:15.000Z
|
""" Unit tests for the system interface."""
import unittest
from six import assertRaisesRegex
from six.moves import cStringIO
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp, ExecComp
from openmdao.test_suite.components.options_feature_vector import VectorDoublingComp
from openmdao.utils.assert_utils import assert_rel_error, assert_warning
class TestSystem(unittest.TestCase):
def test_vector_context_managers(self):
g1 = Group()
g1.add_subsystem('Indep', IndepVarComp('a', 5.0), promotes=['a'])
g2 = g1.add_subsystem('G2', Group(), promotes=['*'])
g2.add_subsystem('C1', ExecComp('b=2*a'), promotes=['a', 'b'])
model = Group()
model.add_subsystem('G1', g1, promotes=['b'])
model.add_subsystem('Sink', ExecComp('c=2*b'), promotes=['b'])
p = Problem(model=model)
p.set_solver_print(level=0)
# Test pre-setup errors
with self.assertRaises(Exception) as cm:
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(str(cm.exception),
"Group: Cannot get vectors because setup has not yet been called.")
with self.assertRaises(Exception) as cm:
d_inputs, d_outputs, d_residuals = model.get_linear_vectors('vec')
self.assertEqual(str(cm.exception),
"Group: Cannot get vectors because setup has not yet been called.")
p.setup()
p.run_model()
# Test inputs with original values
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(inputs['G1.G2.C1.a'], 5.)
inputs, outputs, residuals = g1.get_nonlinear_vectors()
self.assertEqual(inputs['G2.C1.a'], 5.)
# Test inputs after setting a new value
inputs, outputs, residuals = g2.get_nonlinear_vectors()
inputs['C1.a'] = -1.
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(inputs['G1.G2.C1.a'], -1.)
inputs, outputs, residuals = g1.get_nonlinear_vectors()
self.assertEqual(inputs['G2.C1.a'], -1.)
# Test outputs with original values
inputs, outputs, residuals = model.get_nonlinear_vectors()
self.assertEqual(outputs['G1.G2.C1.b'], 10.)
inputs, outputs, residuals = g2.get_nonlinear_vectors()
# Test outputs after setting a new value
inputs, outputs, residuals = model.get_nonlinear_vectors()
outputs['G1.G2.C1.b'] = 123.
self.assertEqual(outputs['G1.G2.C1.b'], 123.)
inputs, outputs, residuals = g2.get_nonlinear_vectors()
outputs['C1.b'] = 789.
self.assertEqual(outputs['C1.b'], 789.)
# Test residuals
inputs, outputs, residuals = model.get_nonlinear_vectors()
residuals['G1.G2.C1.b'] = 99.0
self.assertEqual(residuals['G1.G2.C1.b'], 99.0)
# Test linear
d_inputs, d_outputs, d_residuals = model.get_linear_vectors('linear')
d_outputs['G1.G2.C1.b'] = 10.
self.assertEqual(d_outputs['G1.G2.C1.b'], 10.)
# Test linear with invalid vec_name
with self.assertRaises(Exception) as cm:
d_inputs, d_outputs, d_residuals = model.get_linear_vectors('bad_name')
self.assertEqual(str(cm.exception),
"Group (<model>): There is no linear vector named %s" % 'bad_name')
def test_set_checks_shape(self):
indep = IndepVarComp()
indep.add_output('a')
indep.add_output('x', shape=(5, 1))
g1 = Group()
g1.add_subsystem('Indep', indep, promotes=['a', 'x'])
g2 = g1.add_subsystem('G2', Group(), promotes=['*'])
g2.add_subsystem('C1', ExecComp('b=2*a'), promotes=['a', 'b'])
g2.add_subsystem('C2', ExecComp('y=2*x',
x=np.zeros((5, 1)),
y=np.zeros((5, 1))),
promotes=['x', 'y'])
model = Group()
model.add_subsystem('G1', g1, promotes=['b', 'y'])
model.add_subsystem('Sink', ExecComp(('c=2*b', 'z=2*y'),
y=np.zeros((5, 1)),
z=np.zeros((5, 1))),
promotes=['b', 'y'])
p = Problem(model=model)
p.setup()
p.set_solver_print(level=0)
p.run_model()
msg = "Incompatible shape for '.*': Expected (.*) but got (.*)"
num_val = -10
arr_val = -10*np.ones((5, 1))
bad_val = -10*np.ones((10))
inputs, outputs, residuals = g2.get_nonlinear_vectors()
#
# set input
#
# assign array to scalar
with assertRaisesRegex(self, ValueError, msg):
inputs['C1.a'] = arr_val
# assign scalar to array
inputs['C2.x'] = num_val
assert_rel_error(self, inputs['C2.x'], arr_val, 1e-10)
# assign array to array
inputs['C2.x'] = arr_val
assert_rel_error(self, inputs['C2.x'], arr_val, 1e-10)
# assign bad array shape to array
with assertRaisesRegex(self, ValueError, msg):
inputs['C2.x'] = bad_val
# assign list to array
inputs['C2.x'] = arr_val.tolist()
assert_rel_error(self, inputs['C2.x'], arr_val, 1e-10)
# assign bad list shape to array
with assertRaisesRegex(self, ValueError, msg):
inputs['C2.x'] = bad_val.tolist()
#
# set output
#
# assign array to scalar
with assertRaisesRegex(self, ValueError, msg):
outputs['C1.b'] = arr_val
# assign scalar to array
outputs['C2.y'] = num_val
assert_rel_error(self, outputs['C2.y'], arr_val, 1e-10)
# assign array to array
outputs['C2.y'] = arr_val
assert_rel_error(self, outputs['C2.y'], arr_val, 1e-10)
# assign bad array shape to array
with assertRaisesRegex(self, ValueError, msg):
outputs['C2.y'] = bad_val
# assign list to array
outputs['C2.y'] = arr_val.tolist()
assert_rel_error(self, outputs['C2.y'], arr_val, 1e-10)
# assign bad list shape to array
with assertRaisesRegex(self, ValueError, msg):
outputs['C2.y'] = bad_val.tolist()
#
# set residual
#
# assign array to scalar
with assertRaisesRegex(self, ValueError, msg):
residuals['C1.b'] = arr_val
# assign scalar to array
residuals['C2.y'] = num_val
assert_rel_error(self, residuals['C2.y'], arr_val, 1e-10)
# assign array to array
residuals['C2.y'] = arr_val
assert_rel_error(self, residuals['C2.y'], arr_val, 1e-10)
# assign bad array shape to array
with assertRaisesRegex(self, ValueError, msg):
residuals['C2.y'] = bad_val
# assign list to array
residuals['C2.y'] = arr_val.tolist()
assert_rel_error(self, residuals['C2.y'], arr_val, 1e-10)
# assign bad list shape to array
with assertRaisesRegex(self, ValueError, msg):
residuals['C2.y'] = bad_val.tolist()
def test_deprecated_solver_names(self):
class DummySolver():
pass
model = Group()
# check nl_solver setter & getter
msg = "The 'nl_solver' attribute provides backwards compatibility " \
"with OpenMDAO 1.x ; use 'nonlinear_solver' instead."
with assert_warning(DeprecationWarning, msg):
model.nl_solver = DummySolver()
with assert_warning(DeprecationWarning, msg):
solver = model.nl_solver
self.assertTrue(isinstance(solver, DummySolver))
# check ln_solver setter & getter
msg = "The 'ln_solver' attribute provides backwards compatibility " \
"with OpenMDAO 1.x ; use 'linear_solver' instead."
with assert_warning(DeprecationWarning, msg):
model.ln_solver = DummySolver()
with assert_warning(DeprecationWarning, msg):
solver = model.ln_solver
self.assertTrue(isinstance(solver, DummySolver))
def test_deprecated_metadata(self):
prob = Problem()
prob.model.add_subsystem('inputs', IndepVarComp('x', shape=3))
prob.model.add_subsystem('double', VectorDoublingComp())
msg = "The 'metadata' attribute provides backwards compatibility " \
"with earlier version of OpenMDAO; use 'options' instead."
with assert_warning(DeprecationWarning, msg):
prob.model.double.metadata['size'] = 3
prob.model.connect('inputs.x', 'double.x')
prob.setup()
prob['inputs.x'] = [1., 2., 3.]
prob.run_model()
assert_rel_error(self, prob['double.y'], [2., 4., 6.])
def test_list_inputs_output_with_includes_excludes(self):
from openmdao.test_suite.scripts.circuit_analysis import Resistor, Diode, Node, Circuit
p = Problem()
model = p.model
model.add_subsystem('ground', IndepVarComp('V', 0., units='V'))
model.add_subsystem('source', IndepVarComp('I', 0.1, units='A'))
model.add_subsystem('circuit', Circuit())
model.connect('source.I', 'circuit.I_in')
model.connect('ground.V', 'circuit.Vg')
p.setup()
p.run_model()
# Inputs with no includes or excludes
inputs = model.list_inputs(out_stream=None)
self.assertEqual( len(inputs), 11)
# Inputs with includes
inputs = model.list_inputs(includes=['*V_out*'], out_stream=None)
self.assertEqual( len(inputs), 3)
# Inputs with includes matching a promoted name
inputs = model.list_inputs(includes=['*Vg*'], out_stream=None)
self.assertEqual( len(inputs), 2)
# Inputs with excludes
inputs = model.list_inputs(excludes=['*V_out*'], out_stream=None)
self.assertEqual( len(inputs), 8)
# Inputs with excludes matching a promoted name
inputs = model.list_inputs(excludes=['*Vg*'], out_stream=None)
self.assertEqual( len(inputs), 9)
# Inputs with includes and excludes
inputs = model.list_inputs(includes=['*V_out*'], excludes=['*Vg*'], out_stream=None)
self.assertEqual( len(inputs), 1)
# Outputs with no includes or excludes. Explicit only
outputs = model.list_outputs(implicit=False, out_stream=None)
self.assertEqual( len(outputs), 5)
# Outputs with includes. Explicit only
outputs = model.list_outputs(includes=['*I'], implicit=False, out_stream=None)
self.assertEqual( len(outputs), 4)
# Outputs with excludes. Explicit only
outputs = model.list_outputs(excludes=['circuit*'], implicit=False, out_stream=None)
self.assertEqual( len(outputs), 2)
if __name__ == "__main__":
unittest.main()
| 35.066879
| 95
| 0.596767
| 1,344
| 11,011
| 4.747024
| 0.144345
| 0.049373
| 0.041379
| 0.028213
| 0.715047
| 0.659561
| 0.59373
| 0.51442
| 0.420219
| 0.296708
| 0
| 0.024589
| 0.276088
| 11,011
| 313
| 96
| 35.178914
| 0.775812
| 0.104259
| 0
| 0.351648
| 0
| 0
| 0.107736
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.027473
| false
| 0.005495
| 0.043956
| 0
| 0.082418
| 0.010989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8638749e9332abd43829f80692ff4532468c5620
| 1,244
|
py
|
Python
|
code/src/db/create_db.py
|
fabiangunzinger/sample_project
|
a5c87d0c3ff2f6ed39f3e3a18557c0ab439f6b42
|
[
"MIT"
] | null | null | null |
code/src/db/create_db.py
|
fabiangunzinger/sample_project
|
a5c87d0c3ff2f6ed39f3e3a18557c0ab439f6b42
|
[
"MIT"
] | null | null | null |
code/src/db/create_db.py
|
fabiangunzinger/sample_project
|
a5c87d0c3ff2f6ed39f3e3a18557c0ab439f6b42
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import sqlite3
import sys
import pandas as pd
from src import config
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('sample')
parser.add_argument('replace')
return parser.parse_args()
def db_tables(connection):
"""List tables in database."""
res = pd.read_sql("select name from sqlite_master", connection)
return res.name.values
def create_database(sample):
"""Create database with tables for targets, outcomes, and predictions."""
db_name = f'{sample}.db'
db_path = os.path.join(config.DATADIR, db_name)
conn = sqlite3.connect(db_path)
usr_name = f'users_{sample}.csv'
usr_path = os.path.join(config.DATADIR, usr_name)
users = pd.read_csv(usr_path)
db_tbls = db_tables(conn)
for tbl in ['decisions', 'outcomes', 'predictions']:
if tbl not in db_tbls:
users.to_sql(tbl, conn, index=False)
conn.execute(f"create index idx_{tbl}_user_id on {tbl}(user_id)")
def main(argv=None):
if argv is None:
argv = sys.argv[:1]
args = parse_args(argv)
create_database(args.sample)
if __name__ == '__main__':
sys.exit(main())
| 25.387755
| 77
| 0.673633
| 180
| 1,244
| 4.455556
| 0.422222
| 0.033666
| 0.032419
| 0.034913
| 0.067332
| 0.067332
| 0
| 0
| 0
| 0
| 0
| 0.005025
| 0.200161
| 1,244
| 48
| 78
| 25.916667
| 0.801005
| 0.109325
| 0
| 0
| 0
| 0
| 0.142336
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863b8cb7916efe3b96226a5e47386ab45aa3a5f0
| 9,162
|
py
|
Python
|
examples/solar/p25_nonsparse_cmmgp.py
|
axdahl/SC-MMGP
|
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
|
[
"Apache-2.0"
] | null | null | null |
examples/solar/p25_nonsparse_cmmgp.py
|
axdahl/SC-MMGP
|
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
|
[
"Apache-2.0"
] | null | null | null |
examples/solar/p25_nonsparse_cmmgp.py
|
axdahl/SC-MMGP
|
c6cd9d9de66bb7074925a4b6485f10a74bdd9f68
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Script to execute example covarying MMGP regression forecasting model
with full Krhh.
Inputs: Data training and test sets (dictionary pickle)
Data for example:
- normalised solar data for 25 sites for 15 minute forecast
- N_train = 4200, N_test = 2276, P = 25, D = 51
- Xtr[:, :50] 2 recent lagged observations for each site in order
- Xtr[:, 50] time index
- link inputs is a 25x2 array (link inputs repeated for every group)
with normalised lat,long for each site in order
Model Options:
- Sparse or full x-function covariance prior Krhh (set bool SPARSE_PRIOR)
- Diagonal or Kronecker-structured variational posterior covariance Sr (set bool DIAG_POST)
- Sparse or full posterior covariance (when Kronecker posterior; set bool SPARSE_POST)
Current Settings (sparse covarying mmgp model with sparse Kronecker posterior):
DIAG_POST = False
SPARSE_PRIOR = False # set True for equivalent sparse scmmgp model
SPARSE_POST = True
Note on specifying group structure for F:
Grouping occurs via block_struct, a nested list of grouping order
Where functions [i] are independent i.e. in own block, set link_kernel[i] = link_inputs[i] = 1.0
See model class preamble and example below for further details.
"""
import os
import numpy as np
import pickle
import pandas as pd
import traceback
import time
import sklearn.cluster
import csv
import sys
import mmgp
from mmgp import likelihoods
from mmgp import kernels
import tensorflow as tf
from mmgp import datasets
from mmgp import losses
from mmgp import util
dpath = '/experiments/datasets/'
dfile = 'p25_inputsdict.pickle'
dlinkfile = 'p25_linkinputsarray.pickle'
outdir = '/experiments/results/p25_nonsparse_cmmgp/'
try:
os.makedirs(outdir)
except FileExistsError:
pass
def get_inputs():
"""
inputsdict contains {'Yte': Yte, 'Ytr': Ytr, 'Xtr': Xtr, 'Xte': Xte} where values are np.arrays
np. arrays are truncated to evenly split into batches of size = batchsize
returns inputsdict, Xtr_link (ndarray, shape = [P, D_link_features])
"""
with open(os.path.join(dpath, dfile), 'rb') as f:
d_all = pickle.load(f)
with open(os.path.join(dpath, dlinkfile), 'rb') as f:
d_link = pickle.load(f)
return d_all, d_link
def init_z(train_inputs, num_inducing):
# Initialize inducing points using clustering.
mini_batch = sklearn.cluster.MiniBatchKMeans(num_inducing)
cluster_indices = mini_batch.fit_predict(train_inputs)
inducing_locations = mini_batch.cluster_centers_
return inducing_locations
FLAGS = util.util.get_flags()
BATCH_SIZE = FLAGS.batch_size
LEARNING_RATE = FLAGS.learning_rate
DISPLAY_STEP = FLAGS.display_step
EPOCHS = FLAGS.n_epochs
NUM_SAMPLES = FLAGS.mc_train
PRED_SAMPLES = FLAGS.mc_test
NUM_INDUCING = FLAGS.n_inducing
NUM_COMPONENTS = FLAGS.num_components
IS_ARD = FLAGS.is_ard
TOL = FLAGS.opt_tol
VAR_STEPS = FLAGS.var_steps
DIAG_POST = False
SPARSE_PRIOR = False
SPARSE_POST = True # option for non-diag post
MAXTIME = 1200
print("settings done")
# define GPRN P and Q
output_dim = 25 #P
node_dim = 25 #Q
lag_dim = 2
save_nlpds = False # If True saves samples of nlpds for n,p,s
# extract dataset
d, d_link = get_inputs()
Ytr, Yte, Xtr, Xte = d['Ytr'], d['Yte'], d['Xtr'], d['Xte']
data = datasets.DataSet(Xtr.astype(np.float32), Ytr.astype(np.float32), shuffle=False)
test = datasets.DataSet(Xte.astype(np.float32), Yte.astype(np.float32), shuffle=False)
print("dataset created")
# model config block rows (where P=Q): block all w.1, w.2 etc, leave f independent
# order of block_struct is rows, node functions
# lists required: block_struct, link_inputs, kern_link, kern
#block_struct nested list of grouping order
weight_struct = [[] for _ in range(output_dim)]
for i in range(output_dim):
row = list(range(i, i+output_dim*(node_dim-1)+1, output_dim))
row_0 = row.pop(i) # bring diag to pivot position
weight_struct[i] = [row_0] + row
nodes = [[x] for x in list(range(output_dim * node_dim, output_dim * node_dim + output_dim))]
block_struct = weight_struct + nodes
# create link inputs (link inputs used repeatedly but can have link input per group)
# permute to bring diagonal to first position
link_inputs = [[] for _ in range(output_dim)]
for i in range(output_dim):
idx = list(range(d_link.shape[0]))
link_inputs[i] = d_link[[idx.pop(i)] + idx, :]
link_inputs = link_inputs + [1.0 for i in range(output_dim)] # for full W row blocks, independent nodes
# create 'between' kernel list
klink_rows = [kernels.CompositeKernel('mul',[kernels.RadialBasis(2, std_dev=2.0, lengthscale=1.0, white=0.01, input_scaling = IS_ARD),
kernels.CompactSlice(2, active_dims=[0,1], lengthscale = 2.0, input_scaling = IS_ARD)] )
for i in range(output_dim) ]
klink_f = [1.0 for i in range(node_dim)]
kernlink = klink_rows + klink_f
# create 'within' kernel
# kern
lag_active_dims_s = [ [] for _ in range(output_dim)]
for i in range(output_dim):
lag_active_dims_s[i] = list(range(lag_dim*i, lag_dim*(i+1)))
k_rows = [kernels.CompositeKernel('mul',[kernels.RadialBasisSlice(lag_dim, active_dims=lag_active_dims_s[i],
std_dev = 1.0, white = 0.01, input_scaling = IS_ARD),
kernels.PeriodicSliceFixed(1, active_dims=[Xtr.shape[1]-1],
lengthscale=0.5, std_dev=1.0, period = 144) ])
for i in range(output_dim)]
k_f = [kernels.RadialBasisSlice(lag_dim, active_dims=lag_active_dims_s[i], std_dev = 1.0, white = 0.01, input_scaling = IS_ARD)
for i in range(output_dim)]
kern = k_rows + k_f
print('len link_inputs ',len(link_inputs))
print('len kernlink ',len(kernlink))
print('len kern ', len(kern))
print('no. groups = ', len(block_struct), 'no. latent functions =', len([i for b in block_struct for i in b]))
print('number latent functions', node_dim*(output_dim+1))
likelihood = likelihoods.CovaryingRegressionNetwork(output_dim, node_dim, std_dev = 0.2) # p, q, lik_noise
print("likelihood and kernels set")
Z = init_z(data.X, NUM_INDUCING)
print('inducing points set')
m = mmgp.ExplicitSCMMGP(output_dim, likelihood, kern, kernlink, block_struct, Z, link_inputs,
num_components=NUM_COMPONENTS, diag_post=DIAG_POST, sparse_prior=SPARSE_PRIOR,
sparse_post=SPARSE_POST, num_samples=NUM_SAMPLES, predict_samples=PRED_SAMPLES)
print("model set")
# initialise losses and logging
error_rate = losses.RootMeanSqError(data.Dout)
os.chdir(outdir)
with open("log_results.csv", 'w', newline='') as f:
csv.writer(f).writerow(['epoch', 'fit_runtime', 'nelbo', error_rate.get_name(),'generalised_nlpd'])
with open("log_params.csv", 'w', newline='') as f:
csv.writer(f).writerow(['epoch', 'raw_kernel_params', 'raw_kernlink_params', 'raw_likelihood_params', 'raw_weights'])
with open("log_comp_time.csv", 'w', newline='') as f:
csv.writer(f).writerow(['epoch', 'batch_time', 'nelbo_time', 'pred_time', 'gen_nlpd_time', error_rate.get_name()+'_time'])
# optimise
o = tf.train.AdamOptimizer(LEARNING_RATE, beta1=0.9,beta2=0.99)
print("start time = ", time.strftime('%X %x %Z'))
m.fit(data, o, var_steps = VAR_STEPS, epochs = EPOCHS, batch_size = BATCH_SIZE, display_step=DISPLAY_STEP,
test = test, loss = error_rate, tolerance = TOL, max_time=MAXTIME )
print("optimisation complete")
# export final predicted values and loss metrics
ypred = m.predict(test.X, batch_size = BATCH_SIZE) #same batchsize used for convenience
np.savetxt("predictions.csv", np.concatenate(ypred, axis=1), delimiter=",")
if save_nlpds == True:
nlpd_samples, nlpd_meanvar = m.nlpd_samples(test.X, test.Y, batch_size = BATCH_SIZE)
try:
np.savetxt("nlpd_meanvar.csv", nlpd_meanvar, delimiter=",") # N x 2P as for predictions
except:
print('nlpd_meanvar export fail')
try:
np.savetxt("nlpd_samples.csv", nlpd_samples, delimiter=",") # NP x S (NxS concat for P tasks)
except:
print('nlpd_samples export fail')
print("Final " + error_rate.get_name() + "=" + "%.4f" % error_rate.eval(test.Y, ypred[0]))
print("Final " + "generalised_nlpd" + "=" + "%.4f" % m.nlpd_general(test.X, test.Y, batch_size = BATCH_SIZE))
error_rate_end = [losses.MeanAbsError(data.Dout)] # any extra accuracy measures at end of routine
print("Final ", [e.get_name() for e in error_rate_end])
print([e.eval(test.Y, ypred[0]) for e in error_rate_end])
predvar = [np.mean(np.mean(ypred[1]))]
print("Final predvar ", predvar)
with open("final_losses.csv", 'w', newline='') as f:
csv.writer(f).writerows([[e.get_name() for e in error_rate_end] + ['pred_var'],
[e.eval(test.Y, ypred[0]) for e in error_rate_end] + predvar])
print("finish time = " + time.strftime('%X %x %Z'))
| 39.662338
| 135
| 0.685986
| 1,363
| 9,162
| 4.434336
| 0.264857
| 0.028293
| 0.02548
| 0.026473
| 0.196889
| 0.157512
| 0.118134
| 0.118134
| 0.104897
| 0.096294
| 0
| 0.015385
| 0.198319
| 9,162
| 230
| 136
| 39.834783
| 0.807488
| 0.258677
| 0
| 0.07971
| 0
| 0
| 0.117774
| 0.020141
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014493
| false
| 0.007246
| 0.115942
| 0
| 0.144928
| 0.144928
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863bc130193c72b67b65cca8c77628d45f1d2148
| 11,072
|
py
|
Python
|
cruiser-lib/test/positioning/test_position_hl_commander.py
|
cfreebuf/kubeedge-examples
|
9b2ab402c33546215a0a9e02e92f5b0aa88bcff9
|
[
"Apache-2.0"
] | null | null | null |
cruiser-lib/test/positioning/test_position_hl_commander.py
|
cfreebuf/kubeedge-examples
|
9b2ab402c33546215a0a9e02e92f5b0aa88bcff9
|
[
"Apache-2.0"
] | null | null | null |
cruiser-lib/test/positioning/test_position_hl_commander.py
|
cfreebuf/kubeedge-examples
|
9b2ab402c33546215a0a9e02e92f5b0aa88bcff9
|
[
"Apache-2.0"
] | 1
|
2019-12-02T01:00:18.000Z
|
2019-12-02T01:00:18.000Z
|
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2018 Bitcraze AB
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import math
import sys
import unittest
from cflib.crazyflie import Crazyflie
from cflib.crazyflie import HighLevelCommander
from cflib.crazyflie import Param
from cflib.positioning.position_hl_commander import PositionHlCommander
if sys.version_info < (3, 3):
from mock import MagicMock, patch, call
else:
from unittest.mock import MagicMock, patch, call
@patch('time.sleep')
class TestPositionHlCommander(unittest.TestCase):
def setUp(self):
self.commander_mock = MagicMock(spec=HighLevelCommander)
self.param_mock = MagicMock(spec=Param)
self.cf_mock = MagicMock(spec=Crazyflie)
self.cf_mock.high_level_commander = self.commander_mock
self.cf_mock.param = self.param_mock
self.cf_mock.is_connected.return_value = True
self.sut = PositionHlCommander(self.cf_mock)
def test_that_the_estimator_is_reset_on_take_off(
self, sleep_mock):
# Fixture
sut = PositionHlCommander(self.cf_mock, 1.0, 2.0, 3.0)
# Test
sut.take_off()
# Assert
self.param_mock.set_value.assert_has_calls([
call('kalman.initialX', '{:.2f}'.format(1.0)),
call('kalman.initialY', '{:.2f}'.format(2.0)),
call('kalman.initialZ', '{:.2f}'.format(3.0)),
call('kalman.resetEstimation', '1'),
call('kalman.resetEstimation', '0')
])
def test_that_the_hi_level_commander_is_activated_on_take_off(
self, sleep_mock):
# Fixture
# Test
self.sut.take_off()
# Assert
self.param_mock.set_value.assert_has_calls([
call('commander.enHighLevel', '1')
])
def test_that_controller_is_selected_on_take_off(
self, sleep_mock):
# Fixture
self.sut.set_controller(PositionHlCommander.CONTROLLER_MELLINGER)
# Test
self.sut.take_off()
# Assert
self.param_mock.set_value.assert_has_calls([
call('stabilizer.controller', '2')
])
def test_that_take_off_raises_exception_if_not_connected(
self, sleep_mock):
# Fixture
self.cf_mock.is_connected.return_value = False
# Test
# Assert
with self.assertRaises(Exception):
self.sut.take_off()
def test_that_take_off_raises_exception_when_already_flying(
self, sleep_mock):
# Fixture
self.sut.take_off()
# Test
# Assert
with self.assertRaises(Exception):
self.sut.take_off()
def test_that_it_goes_up_on_take_off(
self, sleep_mock):
# Fixture
# Test
self.sut.take_off(height=0.4, velocity=0.6)
# Assert
duration = 0.4 / 0.6
self.commander_mock.takeoff.assert_called_with(0.4, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_up_to_default_height(
self, sleep_mock):
# Fixture
sut = PositionHlCommander(self.cf_mock, default_height=0.4)
# Test
sut.take_off(velocity=0.6)
# Assert
duration = 0.4 / 0.6
self.commander_mock.takeoff.assert_called_with(0.4, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_down_on_landing(
self, sleep_mock):
# Fixture
self.sut.take_off(height=0.4)
# Test
self.sut.land(velocity=0.6)
# Assert
duration = 0.4 / 0.6
self.commander_mock.land.assert_called_with(0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_takes_off_and_lands_as_context_manager(
self, sleep_mock):
# Fixture
# Test
with self.sut:
pass
# Assert
duration1 = 0.5 / 0.5
duration2 = 0.5 / 0.5
self.commander_mock.takeoff.assert_called_with(0.5, duration1)
self.commander_mock.land.assert_called_with(0.0, duration2)
sleep_mock.assert_called_with(duration1)
sleep_mock.assert_called_with(duration2)
def test_that_it_returns_current_position(
self, sleep_mock):
# Fixture
self.sut.take_off(height=0.4, velocity=0.6)
# Test
actual = self.sut.get_position()
# Assert
self.assertEqual(actual, (0.0, 0.0, 0.4))
def test_that_it_goes_to_position(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.go_to(1.0, 2.0, 3.0, 4.0)
# Assert
distance = self._distance(inital_pos, (1.0, 2.0, 3.0))
duration = distance / 4.0
self.commander_mock.go_to.assert_called_with(
1.0, 2.0, 3.0, 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_moves_distance(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.move_distance(1.0, 2.0, 3.0, 4.0)
# Assert
distance = self._distance((0.0, 0.0, 0.0), (1.0, 2.0, 3.0))
duration = distance / 4.0
final_pos = (
inital_pos[0] + 1.0,
inital_pos[1] + 2.0,
inital_pos[2] + 3.0)
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_forward(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.forward(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0] + 1.0,
inital_pos[1],
inital_pos[2])
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_back(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.back(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0] - 1.0,
inital_pos[1],
inital_pos[2])
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_left(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.left(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0],
inital_pos[1] + 1.0,
inital_pos[2])
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_right(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.right(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0],
inital_pos[1] - 1,
inital_pos[2])
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_up(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.up(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0],
inital_pos[1],
inital_pos[2] + 1)
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0, duration)
sleep_mock.assert_called_with(duration)
def test_that_it_goes_down(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
# Test
self.sut.down(1.0, 2.0)
# Assert
duration = 1.0 / 2.0
final_pos = (
inital_pos[0],
inital_pos[1],
inital_pos[2] - 1)
self.commander_mock.go_to.assert_called_with(
final_pos[0], final_pos[1], final_pos[2], 0, duration)
sleep_mock.assert_called_with(duration)
def test_that_default_velocity_is_used(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
self.sut.set_default_velocity(7)
# Test
self.sut.go_to(1.0, 2.0, 3.0)
# Assert
distance = self._distance(inital_pos, (1.0, 2.0, 3.0))
duration = distance / 7.0
self.commander_mock.go_to.assert_called_with(
1.0, 2.0, 3.0, 0.0, duration)
sleep_mock.assert_called_with(duration)
def test_that_default_height_is_used(
self, sleep_mock):
# Fixture
self.sut.take_off()
inital_pos = self.sut.get_position()
self.sut.set_default_velocity(7.0)
self.sut.set_default_height(5.0)
# Test
self.sut.go_to(1.0, 2.0)
# Assert
distance = self._distance(inital_pos, (1.0, 2.0, 5.0))
duration = distance / 7.0
self.commander_mock.go_to.assert_called_with(
1.0, 2.0, 5.0, 0.0, duration)
sleep_mock.assert_called_with(duration)
######################################################################
def _distance(self, p1, p2):
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
dz = p1[2] - p2[2]
return math.sqrt(dx * dx + dy * dy + dz * dz)
if __name__ == '__main__':
unittest.main()
| 29.525333
| 74
| 0.587157
| 1,470
| 11,072
| 4.105442
| 0.137415
| 0.053355
| 0.079536
| 0.015907
| 0.693621
| 0.654515
| 0.641425
| 0.61541
| 0.602983
| 0.570174
| 0
| 0.041184
| 0.29823
| 11,072
| 374
| 75
| 29.604278
| 0.735521
| 0.125181
| 0
| 0.563063
| 0
| 0
| 0.01794
| 0.009022
| 0
| 0
| 0
| 0
| 0.162162
| 1
| 0.099099
| false
| 0.004505
| 0.040541
| 0
| 0.148649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863c0ed7e6b8dca169d56f2c58a602b033d4bb29
| 6,232
|
py
|
Python
|
onmt/keyphrase/pke/unsupervised/graph_based/expandrank.py
|
NaomiatLibrary/OpenNMT-kpg-release
|
1da3468d7dad22529a77f3526abf9b373bd3dc4c
|
[
"MIT"
] | 152
|
2019-10-07T03:15:53.000Z
|
2022-03-24T16:26:26.000Z
|
onmt/keyphrase/pke/unsupervised/graph_based/expandrank.py
|
NaomiatLibrary/OpenNMT-kpg-release
|
1da3468d7dad22529a77f3526abf9b373bd3dc4c
|
[
"MIT"
] | 46
|
2019-11-04T09:51:51.000Z
|
2022-03-06T18:40:13.000Z
|
onmt/keyphrase/pke/unsupervised/graph_based/expandrank.py
|
NaomiatLibrary/OpenNMT-kpg-release
|
1da3468d7dad22529a77f3526abf9b373bd3dc4c
|
[
"MIT"
] | 28
|
2019-11-04T02:02:23.000Z
|
2021-12-29T06:10:04.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 10-02-2018
"""ExpandRank keyphrase extraction model.
Graph-based ranking approach to keyphrase extraction described in:
* Xiaojun Wan and Jianguo Xiao.
Single Document Keyphrase Extraction Using Neighborhood Knowledge.
*In proceedings of AAAI*, pages 855-860, 2008.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from onmt.keyphrase.pke.unsupervised import SingleRank
from onmt.keyphrase.pke.base import LoadFile
import networkx as nx
import logging
class ExpandRank(SingleRank):
"""ExpandRank keyphrase extraction model.
Parameterized example::
import pke
import string
from nltk.corpus import stopwords
# 1. create an ExpandRank extractor.
extractor = pke.unsupervised.ExpandRank()
# 2. load the content of the document.
extractor.load_document(input='path/to/input.xml')
# 3. select the the longest sequences of nouns and adjectives, that do
# not contain punctuation marks or stopwords as candidates.
pos = {'NOUN', 'PROPN', 'ADJ'}
stoplist = list(string.punctuation)
stoplist += ['-lrb-', '-rrb-', '-lcb-', '-rcb-', '-lsb-', '-rsb-']
stoplist += stopwords.words('english')
extractor.candidate_selection(pos=pos, stoplist=stoplist)
# 4. weight the candidates using the sum of their word's scores that are
# computed using random walk. In the graph, nodes are words (nouns
# and adjectives only) that are connected if they occur in a window
# of 10 words. A set of extra documents should be provided to expand
# the graph.
expanded_documents = [('path/to/input1.xml', similarity1),
('path/to/input2.xml', similarity2)]
extractor.candidate_weighting(window=10,
pos=pos,
expanded_documents=expanded_documents,
format='corenlp')
# 5. get the 10-highest scored candidates as keyphrases
keyphrases = extractor.get_n_best(n=10)
"""
def __init__(self):
""" Redefining initializer for ExpandRank. """
super(ExpandRank, self).__init__()
def expand_word_graph(self,
input_file,
similarity,
window=10,
pos=None):
"""Expands the word graph using the given document.
Args:
input_file (str): path to the input file.
similarity (float): similarity for weighting edges.
window (int): the window within the sentence for connecting two
words in the graph, defaults to 10.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
"""
# define default pos tags set
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
# initialize document loader
doc = LoadFile()
# read document
doc.load_document(input=input_file,
language=self.language,
normalization=self.normalization)
# flatten document and initialize nodes
sequence = []
for sentence in doc.sentences:
for j, node in enumerate(sentence.stems):
if node not in self.graph and sentence.pos[j] in pos:
self.graph.add_node(node)
sequence.append((node, sentence.pos[j]))
# loop through sequence to build the edges in the graph
for j, node_1 in enumerate(sequence):
for k in range(j + 1, min(j + window, len(sequence))):
node_2 = sequence[k]
if node_1[1] in pos and node_2[1] in pos \
and node_1[0] != node_2[0]:
if not self.graph.has_edge(node_1[0], node_2[0]):
self.graph.add_edge(node_1[0], node_2[0], weight=0)
self.graph[node_1[0]][node_2[0]]['weight'] += similarity
def candidate_weighting(self,
window=10,
pos=None,
expanded_documents=None,
normalized=False):
"""Candidate ranking using random walk.
Args:
window (int): the window within the sentence for connecting two
words in the graph, defaults to 10.
pos (set): the set of valid pos for words to be considered as nodes
in the graph, defaults to ('NOUN', 'PROPN', 'ADJ').
expanded_documents (list): the set of documents to expand the graph,
should be a list of tuples (input_path, similarity). Defaults to
empty list, i.e. no expansion.
normalized (False): normalize keyphrase score by their length,
defaults to False.
"""
# define default pos tags set
if pos is None:
pos = {'NOUN', 'PROPN', 'ADJ'}
if expanded_documents is None:
expanded_documents = []
logging.warning('No neighbor documents provided for ExpandRank.')
# build the word graph
self.build_word_graph(window=window, pos=pos)
# expand the word graph
for input_file, similarity in expanded_documents:
self.expand_word_graph(input_file=input_file,
similarity=similarity,
window=window,
pos=pos)
# compute the word scores using random walk
w = nx.pagerank_scipy(self.graph, alpha=0.85, weight='weight')
# loop through the candidates
for k in self.candidates.keys():
tokens = self.candidates[k].lexical_form
self.weights[k] = sum([w[t] for t in tokens])
if normalized:
self.weights[k] /= len(tokens)
| 37.542169
| 80
| 0.574294
| 723
| 6,232
| 4.850622
| 0.316736
| 0.018249
| 0.017109
| 0.02053
| 0.145423
| 0.13915
| 0.135729
| 0.11976
| 0.11976
| 0.11976
| 0
| 0.017313
| 0.341945
| 6,232
| 165
| 81
| 37.769697
| 0.837844
| 0.503209
| 0
| 0.103448
| 0
| 0
| 0.029475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.137931
| 0
| 0.206897
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863daa6816d11bde4f87896d1cc47d06ece1f0db
| 5,066
|
py
|
Python
|
dash/long_callback/managers/celery_manager.py
|
nickmelnikov82/dash
|
e774908da770bee83f3213e0307c27ed8a40500e
|
[
"MIT"
] | 17,143
|
2015-07-14T17:19:05.000Z
|
2022-03-31T10:03:39.000Z
|
dash/long_callback/managers/celery_manager.py
|
nickmelnikov82/dash
|
e774908da770bee83f3213e0307c27ed8a40500e
|
[
"MIT"
] | 1,630
|
2015-11-17T22:15:41.000Z
|
2022-03-31T09:15:07.000Z
|
dash/long_callback/managers/celery_manager.py
|
nickmelnikov82/dash
|
e774908da770bee83f3213e0307c27ed8a40500e
|
[
"MIT"
] | 1,970
|
2015-07-12T07:05:14.000Z
|
2022-03-30T19:58:09.000Z
|
import json
import inspect
import hashlib
from _plotly_utils.utils import PlotlyJSONEncoder
from dash.long_callback.managers import BaseLongCallbackManager
class CeleryLongCallbackManager(BaseLongCallbackManager):
def __init__(self, celery_app, cache_by=None, expire=None):
"""
Long callback manager that runs callback logic on a celery task queue,
and stores results using a celery result backend.
:param celery_app:
A celery.Celery application instance that must be configured with a
result backend. See the celery documentation for information on
configuration options.
:param cache_by:
A list of zero-argument functions. When provided, caching is enabled and
the return values of these functions are combined with the callback
function's input arguments and source code to generate cache keys.
:param expire:
If provided, a cache entry will be removed when it has not been accessed
for ``expire`` seconds. If not provided, the lifetime of cache entries
is determined by the default behavior of the celery result backend.
"""
try:
import celery # pylint: disable=import-outside-toplevel,import-error
from celery.backends.base import ( # pylint: disable=import-outside-toplevel,import-error
DisabledBackend,
)
except ImportError as missing_imports:
raise ImportError(
"""\
CeleryLongCallbackManager requires extra dependencies which can be installed doing
$ pip install "dash[celery]"\n"""
) from missing_imports
if not isinstance(celery_app, celery.Celery):
raise ValueError("First argument must be a celery.Celery object")
if isinstance(celery_app.backend, DisabledBackend):
raise ValueError("Celery instance must be configured with a result backend")
super().__init__(cache_by)
self.handle = celery_app
self.expire = expire
def terminate_job(self, job):
if job is None:
return
self.handle.control.terminate(job)
def terminate_unhealthy_job(self, job):
task = self.get_task(job)
if task and task.status in ("FAILURE", "REVOKED"):
return self.terminate_job(job)
return False
def job_running(self, job):
future = self.get_task(job)
return future and future.status in (
"PENDING",
"RECEIVED",
"STARTED",
"RETRY",
"PROGRESS",
)
def make_job_fn(self, fn, progress, args_deps):
return _make_job_fn(fn, self.handle, progress, args_deps)
def get_task(self, job):
if job:
return self.handle.AsyncResult(job)
return None
def clear_cache_entry(self, key):
self.handle.backend.delete(key)
def call_job_fn(self, key, job_fn, args):
task = job_fn.delay(key, self._make_progress_key(key), args)
return task.task_id
def get_progress(self, key):
progress_key = self._make_progress_key(key)
progress_data = self.handle.backend.get(progress_key)
if progress_data:
return json.loads(progress_data)
return None
def result_ready(self, key):
return self.handle.backend.get(key) is not None
def get_result(self, key, job):
# Get result value
result = self.handle.backend.get(key)
if result is None:
return None
result = json.loads(result)
# Clear result if not caching
if self.cache_by is None:
self.clear_cache_entry(key)
else:
if self.expire:
# Set/update expiration time
self.handle.backend.expire(key, self.expire)
self.clear_cache_entry(self._make_progress_key(key))
self.terminate_job(job)
return result
def _make_job_fn(fn, celery_app, progress, args_deps):
cache = celery_app.backend
# Hash function source and module to create a unique (but stable) celery task name
fn_source = inspect.getsource(fn)
fn_str = fn_source
fn_hash = hashlib.sha1(fn_str.encode("utf-8")).hexdigest()
@celery_app.task(name=f"long_callback_{fn_hash}")
def job_fn(result_key, progress_key, user_callback_args, fn=fn):
def _set_progress(progress_value):
cache.set(progress_key, json.dumps(progress_value, cls=PlotlyJSONEncoder))
maybe_progress = [_set_progress] if progress else []
if isinstance(args_deps, dict):
user_callback_output = fn(*maybe_progress, **user_callback_args)
elif isinstance(args_deps, (list, tuple)):
user_callback_output = fn(*maybe_progress, *user_callback_args)
else:
user_callback_output = fn(*maybe_progress, user_callback_args)
cache.set(result_key, json.dumps(user_callback_output, cls=PlotlyJSONEncoder))
return job_fn
| 34.937931
| 102
| 0.651402
| 632
| 5,066
| 5.039557
| 0.287975
| 0.028257
| 0.026688
| 0.017896
| 0.148509
| 0.11146
| 0.095761
| 0.046154
| 0.046154
| 0
| 0
| 0.000543
| 0.273194
| 5,066
| 144
| 103
| 35.180556
| 0.864476
| 0.206277
| 0
| 0.055556
| 0
| 0
| 0.047315
| 0.006114
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155556
| false
| 0
| 0.111111
| 0.022222
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
863dcb2e53c2cd1e93015fc8efa9a5e953801c7f
| 2,027
|
py
|
Python
|
libraries/botframework-connector/botframework/connector/token_api/_token_api_client.py
|
victor-kironde/botbuilder-python
|
e893d9b036d7cf33cf9c9afd1405450c354cdbcd
|
[
"MIT"
] | 10
|
2019-05-11T18:07:14.000Z
|
2021-08-20T03:02:47.000Z
|
libraries/botframework-connector/botframework/connector/token_api/_token_api_client.py
|
Fortune-Adekogbe/botbuilder-python
|
4e48c874c32a2a7fe7f27a7a1f825e2aa39466c4
|
[
"MIT"
] | 13
|
2020-09-05T11:06:05.000Z
|
2020-10-29T05:01:19.000Z
|
botframework_connector-4.11.0/botframework/connector/token_api/_token_api_client.py
|
opsdroid/wheels-for-teams-connector
|
c283bb5ab95a4c3d4023767d53d2686aa531f372
|
[
"Apache-2.0"
] | 18
|
2019-08-19T12:11:00.000Z
|
2021-10-12T09:36:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import TokenApiClientConfiguration
from .operations import BotSignInOperations
from .operations import UserTokenOperations
from . import models
class TokenApiClient(SDKClient):
"""TokenApiClient
:ivar config: Configuration for client.
:vartype config: TokenApiClientConfiguration
:ivar bot_sign_in: BotSignIn operations
:vartype bot_sign_in: botframework.tokenapi.operations.BotSignInOperations
:ivar user_token: UserToken operations
:vartype user_token: botframework.tokenapi.operations.UserTokenOperations
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(self, credentials, base_url=None):
self.config = TokenApiClientConfiguration(credentials, base_url)
super(TokenApiClient, self).__init__(self.config.credentials, self.config)
client_models = {
k: v for k, v in models.__dict__.items() if isinstance(v, type)
}
self.api_version = "token"
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.bot_sign_in = BotSignInOperations(
self._client, self.config, self._serialize, self._deserialize
)
self.user_token = UserTokenOperations(
self._client, self.config, self._serialize, self._deserialize
)
| 36.196429
| 82
| 0.683769
| 205
| 2,027
| 6.57561
| 0.443902
| 0.037092
| 0.02003
| 0.029674
| 0.071217
| 0.071217
| 0.071217
| 0.071217
| 0
| 0
| 0
| 0.000601
| 0.178589
| 2,027
| 55
| 83
| 36.854545
| 0.809009
| 0.459793
| 0
| 0.090909
| 0
| 0
| 0.004831
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.272727
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|