hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fad85e15e07512f12e7b222a127aaf3fb9ecb328 | 670 | py | Python | occuspytial/tests/test_chain.py | zoj613/OccuSpytial | e599d515b5377cb8f7a16af828f39b115a04bc66 | [
"BSD-3-Clause"
] | 5 | 2018-11-27T13:41:51.000Z | 2021-08-28T23:50:13.000Z | occuspytial/tests/test_chain.py | zoj613/OccuSpytial | e599d515b5377cb8f7a16af828f39b115a04bc66 | [
"BSD-3-Clause"
] | 4 | 2020-08-20T16:58:11.000Z | 2021-03-22T20:27:55.000Z | occuspytial/tests/test_chain.py | zoj613/OccuSpytial | e599d515b5377cb8f7a16af828f39b115a04bc66 | [
"BSD-3-Clause"
] | 1 | 2021-02-01T16:17:46.000Z | 2021-02-01T16:17:46.000Z | import numpy as np
import pytest
from occuspytial.chain import Chain
def test_chain():
params = {'p1': 2, 'p2': 1}
c = Chain(params, 1)
# test the number of columns of the full array of parameters
assert c.full.shape[1] == 3
assert len(c) == 0
c.append({'p1': [1, 2], 'p2': 3})
assert len(c) == 1
with pytest.raises(ValueError, match='Chain is full'):
c.append({'p1': [1, 2], 'p2': 3})
c.expand(1)
c.append({'p1': [1, 2], 'p2': 3})
assert len(c) == 2
assert np.all(c['p1'] == [[1, 2], [1, 2]])
with pytest.raises(KeyError):
c['p3']
assert repr(c) == "Chain(params: ('p1', 'p2'), size: 2)"
| 22.333333 | 64 | 0.541791 |
f0cfbed17130ebdf6170fb8d9a526b6861e684ca | 520 | py | Python | FileProcessSet/move_files_to_root.py | stwrd/ImageProcessTool | 311e997a5beaf31f90fe734aacf2944084e8dd1d | [
"MIT"
] | null | null | null | FileProcessSet/move_files_to_root.py | stwrd/ImageProcessTool | 311e997a5beaf31f90fe734aacf2944084e8dd1d | [
"MIT"
] | null | null | null | FileProcessSet/move_files_to_root.py | stwrd/ImageProcessTool | 311e997a5beaf31f90fe734aacf2944084e8dd1d | [
"MIT"
] | null | null | null | #移动目标文件夹下的所有文件至根目录
import shutil
import os
from pathlib import Path
pattern_str = '*.jpg'
target_path = '/media/hzh/docker_disk/dataset/traffic/交通违法第一次通用标注/src'
dst_folder = '/media/hzh/docker_disk/dataset/traffic/交通违法第一次通用标注/Annotations'
filenames = Path(target_path).rglob(pattern_str)
for filename in filenames:
base_name = os.path.split(filename.as_posix())[1]
dst_path = os.path.join(dst_folder,base_name)
shutil.copy(filename.as_posix(),dst_path)
print('copy {} to {}'.format(filename,dst_path)) | 32.5 | 77 | 0.765385 |
f3bfbf04f4165da3020cbdec3df696dc7a1aa1c7 | 10,764 | py | Python | tests/unit/cache/test_localfs.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 1 | 2020-01-02T09:03:21.000Z | 2020-01-02T09:03:21.000Z | tests/unit/cache/test_localfs.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | null | null | null | tests/unit/cache/test_localfs.py | jubrad/salt | 7960334fb726cfde45e6409da79a65535c626685 | [
"Apache-2.0"
] | 1 | 2020-01-02T09:03:24.000Z | 2020-01-02T09:03:24.000Z | # -*- coding: utf-8 -*-
'''
unit tests for the localfs cache
'''
# Import Python libs
from __future__ import absolute_import
import shutil
import tempfile
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.paths import TMP
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.payload
import salt.utils.files
import salt.cache.localfs as localfs
from salt.exceptions import SaltCacheError
# Import 3rd-party libs
from salt.ext import six
@skipIf(NO_MOCK, NO_MOCK_REASON)
class LocalFSTest(TestCase, LoaderModuleMockMixin):
'''
Validate the functions in the localfs cache
'''
def setup_loader_modules(self):
return {localfs: {}}
def _create_tmp_cache_file(self, tmp_dir, serializer):
'''
Helper function that creates a temporary cache file using localfs.store. This
is to used to create DRY unit tests for the localfs cache.
'''
self.addCleanup(shutil.rmtree, tmp_dir)
with patch.dict(localfs.__opts__, {'cachedir': tmp_dir}):
with patch.dict(localfs.__context__, {'serial': serializer}):
localfs.store(bank='bank', key='key', data='payload data', cachedir=tmp_dir)
# 'store' function tests: 4
def test_store_no_base_cache_dir(self):
'''
Tests that a SaltCacheError is raised when the base directory doesn't exist and
cannot be created.
'''
with patch('os.path.isdir', MagicMock(return_value=None)):
with patch('os.makedirs', MagicMock(side_effect=OSError)):
self.assertRaises(SaltCacheError, localfs.store, bank='', key='', data='', cachedir='')
def test_store_close_mkstemp_file_handle(self):
'''
Tests that the file descriptor that is opened by os.open during the mkstemp call
in localfs.store is closed before calling salt.utils.files.fopen on the filename.
This test mocks the call to mkstemp, but forces an OSError to be raised when the
close() function is called on a file descriptor that doesn't exist.
'''
with patch('os.path.isdir', MagicMock(return_value=True)):
with patch('tempfile.mkstemp', MagicMock(return_value=(12345, 'foo'))):
self.assertRaises(OSError, localfs.store, bank='', key='', data='', cachedir='')
def test_store_error_writing_cache(self):
'''
Tests that a SaltCacheError is raised when there is a problem writing to the
cache file.
'''
with patch('os.path.isdir', MagicMock(return_value=True)):
with patch('tempfile.mkstemp', MagicMock(return_value=('one', 'two'))):
with patch('os.close', MagicMock(return_value=None)):
with patch('salt.utils.files.fopen', MagicMock(side_effect=IOError)):
self.assertRaises(SaltCacheError, localfs.store, bank='', key='', data='', cachedir='')
def test_store_success(self):
'''
Tests that the store function writes the data to the serializer for storage.
'''
# Create a temporary cache dir
tmp_dir = tempfile.mkdtemp(dir=TMP)
# Use the helper function to create the cache file using localfs.store()
self._create_tmp_cache_file(tmp_dir, salt.payload.Serial(self))
# Read in the contents of the key.p file and assert "payload data" was written
with salt.utils.files.fopen(tmp_dir + '/bank/key.p', 'rb') as fh_:
for line in fh_:
self.assertIn(six.b('payload data'), line)
# 'fetch' function tests: 3
def test_fetch_return_when_cache_file_does_not_exist(self):
'''
Tests that the fetch function returns an empty dic when the cache key file
doesn't exist.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
self.assertEqual(localfs.fetch(bank='', key='', cachedir=''), {})
def test_fetch_error_reading_cache(self):
'''
Tests that a SaltCacheError is raised when there is a problem reading the cache
file.
'''
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('salt.utils.files.fopen', MagicMock(side_effect=IOError)):
self.assertRaises(SaltCacheError, localfs.fetch, bank='', key='', cachedir='')
def test_fetch_success(self):
'''
Tests that the fetch function is able to read the cache file and return its data.
'''
# Create a temporary cache dir
tmp_dir = tempfile.mkdtemp(dir=TMP)
# Create a new serializer object to use in function patches
serializer = salt.payload.Serial(self)
# Use the helper function to create the cache file using localfs.store()
self._create_tmp_cache_file(tmp_dir, serializer)
# Now fetch the data from the new cache key file
with patch.dict(localfs.__opts__, {'cachedir': tmp_dir}):
with patch.dict(localfs.__context__, {'serial': serializer}):
self.assertIn('payload data', localfs.fetch(bank='bank', key='key', cachedir=tmp_dir))
# 'updated' function tests: 3
def test_updated_return_when_cache_file_does_not_exist(self):
'''
Tests that the updated function returns None when the cache key file doesn't
exist.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
self.assertIsNone(localfs.updated(bank='', key='', cachedir=''))
def test_updated_error_when_reading_mtime(self):
'''
Tests that a SaltCacheError is raised when there is a problem reading the mtime
of the cache file.
'''
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.path.getmtime', MagicMock(side_effect=IOError)):
self.assertRaises(SaltCacheError, localfs.updated, bank='', key='', cachedir='')
def test_updated_success(self):
'''
Test that the updated function returns the modification time of the cache file
'''
# Create a temporary cache dir
tmp_dir = tempfile.mkdtemp(dir=TMP)
# Use the helper function to create the cache file using localfs.store()
self._create_tmp_cache_file(tmp_dir, salt.payload.Serial(self))
with patch('os.path.join', MagicMock(return_value=tmp_dir + '/bank/key.p')):
self.assertIsInstance(localfs.updated(bank='bank', key='key', cachedir=tmp_dir), int)
# 'flush' function tests: 4
def test_flush_key_is_none_and_no_target_dir(self):
'''
Tests that the flush function returns False when no key is passed in and the
target directory doesn't exist.
'''
with patch('os.path.isdir', MagicMock(return_value=False)):
self.assertFalse(localfs.flush(bank='', key=None, cachedir=''))
def test_flush_key_provided_and_no_key_file_false(self):
'''
Tests that the flush function returns False when a key file is provided but
the target key file doesn't exist in the cache bank.
'''
with patch('os.path.isfile', MagicMock(return_value=False)):
self.assertFalse(localfs.flush(bank='', key='key', cachedir=''))
def test_flush_success(self):
'''
Tests that the flush function returns True when a key file is provided and
the target key exists in the cache bank.
'''
with patch('os.path.isfile', MagicMock(return_value=True)):
# Create a temporary cache dir
tmp_dir = tempfile.mkdtemp(dir=TMP)
# Use the helper function to create the cache file using localfs.store()
self._create_tmp_cache_file(tmp_dir, salt.payload.Serial(self))
# Now test the return of the flush function
with patch.dict(localfs.__opts__, {'cachedir': tmp_dir}):
self.assertTrue(localfs.flush(bank='bank', key='key', cachedir=tmp_dir))
def test_flush_error_raised(self):
'''
Tests that a SaltCacheError is raised when there is a problem removing the
key file from the cache bank
'''
with patch('os.path.isfile', MagicMock(return_value=True)):
with patch('os.remove', MagicMock(side_effect=OSError)):
self.assertRaises(SaltCacheError, localfs.flush, bank='', key='key', cachedir='/var/cache/salt')
# 'ls' function tests: 3
def test_ls_no_base_dir(self):
'''
Tests that the ls function returns an empty list if the bank directory
doesn't exist.
'''
with patch('os.path.isdir', MagicMock(return_value=False)):
self.assertEqual(localfs.ls(bank='', cachedir=''), [])
def test_ls_error_raised_no_bank_directory_access(self):
'''
Tests that a SaltCacheError is raised when there is a problem accessing the
cache bank directory.
'''
with patch('os.path.isdir', MagicMock(return_value=True)):
with patch('os.listdir', MagicMock(side_effect=OSError)):
self.assertRaises(SaltCacheError, localfs.ls, bank='', cachedir='')
def test_ls_success(self):
'''
Tests the return of the ls function containing bank entries.
'''
# Create a temporary cache dir
tmp_dir = tempfile.mkdtemp(dir=TMP)
# Use the helper function to create the cache file using localfs.store()
self._create_tmp_cache_file(tmp_dir, salt.payload.Serial(self))
# Now test the return of the ls function
with patch.dict(localfs.__opts__, {'cachedir': tmp_dir}):
self.assertEqual(localfs.ls(bank='bank', cachedir=tmp_dir), ['key'])
# 'contains' function tests: 1
def test_contains(self):
'''
Test the return of the contains function when key=None and when a key
is provided.
'''
# Create a temporary cache dir
tmp_dir = tempfile.mkdtemp(dir=TMP)
# Use the helper function to create the cache file using localfs.store()
self._create_tmp_cache_file(tmp_dir, salt.payload.Serial(self))
# Now test the return of the contains function when key=None
with patch.dict(localfs.__opts__, {'cachedir': tmp_dir}):
self.assertTrue(localfs.contains(bank='bank', key=None, cachedir=tmp_dir))
# Now test the return of the contains function when key='key'
with patch.dict(localfs.__opts__, {'cachedir': tmp_dir}):
self.assertTrue(localfs.contains(bank='bank', key='key', cachedir=tmp_dir))
| 40.618868 | 112 | 0.648551 |
3b2934b5d6aebc395e0d591922aa4525238be625 | 9,403 | py | Python | python_modules/dagster/dagster/core/execution/plan/objects.py | stevencasey/dagster | 1881f67f55725c40432dbc1ed99b8cfbd9629600 | [
"Apache-2.0"
] | 1 | 2019-07-15T17:34:04.000Z | 2019-07-15T17:34:04.000Z | python_modules/dagster/dagster/core/execution/plan/objects.py | stevencasey/dagster | 1881f67f55725c40432dbc1ed99b8cfbd9629600 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/execution/plan/objects.py | stevencasey/dagster | 1881f67f55725c40432dbc1ed99b8cfbd9629600 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
from enum import Enum
from dagster import check
from dagster.core.definitions import SolidHandle, Materialization
from dagster.core.types.runtime import RuntimeType
from dagster.utils import merge_dicts
from dagster.utils.error import SerializableErrorInfo
from dagster.core.definitions.events import EventMetadataEntry
class StepOutputHandle(namedtuple('_StepOutputHandle', 'step_key output_name')):
@staticmethod
def from_step(step, output_name='result'):
check.inst_param(step, 'step', ExecutionStep)
return StepOutputHandle(step.key, output_name)
def __new__(cls, step_key, output_name='result'):
return super(StepOutputHandle, cls).__new__(
cls,
step_key=check.str_param(step_key, 'step_key'),
output_name=check.str_param(output_name, 'output_name'),
)
class SingleOutputStepCreationData(namedtuple('SingleOutputStepCreationData', 'step output_name')):
'''
It is very common for step creation to involve processing a single value (e.g. an input thunk).
This tuple is meant to be used by those functions to return both a new step and the output
that deals with the value in question.
'''
@property
def step_output_handle(self):
return StepOutputHandle.from_step(self.step, self.output_name)
class StepInputData(namedtuple('_StepInputData', 'input_name value_repr type_check_data')):
def __new__(cls, input_name, value_repr, type_check_data):
return super(StepInputData, cls).__new__(
cls,
input_name=check.str_param(input_name, 'input_name'),
value_repr=check.str_param(value_repr, 'value_repr'),
type_check_data=check.opt_inst_param(type_check_data, 'type_check_data', TypeCheckData),
)
class TypeCheckData(namedtuple('_TypeCheckData', 'success label description metadata_entries')):
def __new__(cls, success, label, description=None, metadata_entries=None):
return super(TypeCheckData, cls).__new__(
cls,
success=check.bool_param(success, 'success'),
label=check.str_param(label, 'label'),
description=check.opt_str_param(description, 'description'),
metadata_entries=check.opt_list_param(
metadata_entries, metadata_entries, of_type=EventMetadataEntry
),
)
class UserFailureData(namedtuple('_UserFailureData', 'label description metadata_entries')):
def __new__(cls, label, description=None, metadata_entries=None):
return super(UserFailureData, cls).__new__(
cls,
label=check.str_param(label, 'label'),
description=check.opt_str_param(description, 'description'),
metadata_entries=check.opt_list_param(
metadata_entries, metadata_entries, of_type=EventMetadataEntry
),
)
class StepOutputData(
namedtuple(
'_StepOutputData',
'step_output_handle value_repr intermediate_materialization type_check_data',
)
):
def __new__(
cls, step_output_handle, value_repr, intermediate_materialization=None, type_check_data=None
):
return super(StepOutputData, cls).__new__(
cls,
step_output_handle=check.inst_param(
step_output_handle, 'step_output_handle', StepOutputHandle
),
value_repr=check.str_param(value_repr, 'value_repr'),
intermediate_materialization=check.opt_inst_param(
intermediate_materialization, 'intermediate_materialization', Materialization
),
type_check_data=check.opt_inst_param(type_check_data, 'type_check_data', TypeCheckData),
)
@property
def output_name(self):
return self.step_output_handle.output_name
class StepFailureData(namedtuple('_StepFailureData', 'error user_failure_data')):
def __new__(cls, error, user_failure_data):
return super(StepFailureData, cls).__new__(
cls,
error=check.opt_inst_param(error, 'error', SerializableErrorInfo),
user_failure_data=check.opt_inst_param(
user_failure_data, 'user_failure_data', UserFailureData
),
)
class StepSuccessData(namedtuple('_StepSuccessData', 'duration_ms')):
def __new__(cls, duration_ms):
return super(StepSuccessData, cls).__new__(
cls, duration_ms=check.float_param(duration_ms, 'duration_ms')
)
class StepKind(Enum):
COMPUTE = 'COMPUTE'
INPUT_EXPECTATION = 'INPUT_EXPECTATION'
OUTPUT_EXPECTATION = 'OUTPUT_EXPECTATION'
JOIN = 'JOIN'
SERIALIZE = 'SERIALIZE'
UNMARSHAL_INPUT = 'UNMARSHAL_INPUT'
MARSHAL_OUTPUT = 'MARSHAL_OUTPUT'
class StepInput(namedtuple('_StepInput', 'name runtime_type prev_output_handle config_data')):
def __new__(cls, name, runtime_type, prev_output_handle=None, config_data=None):
return super(StepInput, cls).__new__(
cls,
name=check.str_param(name, 'name'),
runtime_type=check.inst_param(runtime_type, 'runtime_type', RuntimeType),
prev_output_handle=check.opt_inst_param(
prev_output_handle, 'prev_output_handle', StepOutputHandle
),
config_data=config_data, # can be any type
)
@property
def is_from_output(self):
return bool(self.prev_output_handle)
class StepOutput(namedtuple('_StepOutput', 'name runtime_type optional')):
def __new__(cls, name, runtime_type, optional):
return super(StepOutput, cls).__new__(
cls,
name=check.str_param(name, 'name'),
runtime_type=check.inst_param(runtime_type, 'runtime_type', RuntimeType),
optional=check.bool_param(optional, 'optional'),
)
class ExecutionStep(
namedtuple(
'_ExecutionStep',
(
'pipeline_name key_suffix step_inputs step_input_dict step_outputs step_output_dict '
'compute_fn kind solid_handle logging_tags metadata'
),
)
):
def __new__(
cls,
pipeline_name,
key_suffix,
step_inputs,
step_outputs,
compute_fn,
kind,
solid_handle,
logging_tags=None,
metadata=None,
):
return super(ExecutionStep, cls).__new__(
cls,
pipeline_name=check.str_param(pipeline_name, 'pipeline_name'),
key_suffix=check.str_param(key_suffix, 'key_suffix'),
step_inputs=check.list_param(step_inputs, 'step_inputs', of_type=StepInput),
step_input_dict={si.name: si for si in step_inputs},
step_outputs=check.list_param(step_outputs, 'step_outputs', of_type=StepOutput),
step_output_dict={so.name: so for so in step_outputs},
compute_fn=check.callable_param(compute_fn, 'compute_fn'),
kind=check.inst_param(kind, 'kind', StepKind),
solid_handle=check.inst_param(solid_handle, 'solid_handle', SolidHandle),
logging_tags=merge_dicts(
{
'step_key': str(solid_handle) + '.' + key_suffix,
'pipeline': pipeline_name,
'solid': solid_handle.name,
'solid_definition': solid_handle.definition_name,
},
check.opt_dict_param(logging_tags, 'logging_tags'),
),
metadata=check.opt_dict_param(metadata, 'metadata', key_type=str, value_type=str),
)
@property
def key(self):
return str(self.solid_handle) + '.' + self.key_suffix
@property
def solid_name(self):
return self.solid_handle.name
@property
def solid_definition_name(self):
return self.solid_handle.definition_name
def has_step_output(self, name):
check.str_param(name, 'name')
return name in self.step_output_dict
def step_output_named(self, name):
check.str_param(name, 'name')
return self.step_output_dict[name]
def has_step_input(self, name):
check.str_param(name, 'name')
return name in self.step_input_dict
def step_input_named(self, name):
check.str_param(name, 'name')
return self.step_input_dict[name]
class ExecutionValueSubplan(
namedtuple('ExecutionValueSubplan', 'steps terminal_step_output_handle')
):
'''
A frequent pattern in the execution engine is to take a single value (e.g. an input or an output
of a compute function) and then flow that value value through a sequence of system-injected
steps (e.g. expectations or materializations). This object captures that pattern. It contains
all of the steps that comprise that Subplan and also a single output handle that points to
output that further steps down the plan can depend on.
'''
def __new__(cls, steps, terminal_step_output_handle):
return super(ExecutionValueSubplan, cls).__new__(
cls,
check.list_param(steps, 'steps', of_type=ExecutionStep),
check.inst_param(
terminal_step_output_handle, 'terminal_step_output_handle', StepOutputHandle
),
)
@staticmethod
def empty(terminal_step_output_handle):
return ExecutionValueSubplan([], terminal_step_output_handle)
| 37.313492 | 100 | 0.672126 |
83bf5aa75b028c5f3330b6c6dc192cb8f63b49bc | 1,639 | py | Python | PythonScript/bigDataFinal.py | ElvaHuang/MeetupNetworkVisualAnalytics | cfcab5fc4bac8f560961e79b8215b2e9d8f8d32a | [
"Apache-2.0"
] | null | null | null | PythonScript/bigDataFinal.py | ElvaHuang/MeetupNetworkVisualAnalytics | cfcab5fc4bac8f560961e79b8215b2e9d8f8d32a | [
"Apache-2.0"
] | null | null | null | PythonScript/bigDataFinal.py | ElvaHuang/MeetupNetworkVisualAnalytics | cfcab5fc4bac8f560961e79b8215b2e9d8f8d32a | [
"Apache-2.0"
] | null | null | null | # filename: big_data_final.py
import networkx as nx
import csv
G1 = nx.Graph(name = 'online')
G2 = nx.Graph(name = 'offline')
Group = {}
reader = csv.reader(file('user_group.csv','rb'))
for line in reader:
if line[1] in Group:
Group[line[1]].append(line[0])
else:
Group[line[1]] = [line[0]]
SIZE_GROUP = 0
for i in Group.keys():
SIZE_GROUP = len(Group[i])
for k in range(0,SIZE_GROUP):
if (k == (SIZE_GROUP - 1)):
break;
for j in range(k+1,SIZE_GROUP):
if (Group[i][k],Group[i][j]) in G1.edges():
G1[Group[i][k]][Group[i][j]]['weight'] = G1[Group[i][k]][Group[i][j]]['weight'] + 1.0/SIZE_GROUP
else:
G1.add_edge(Group[i][k],Group[i][j],weight = 1.0/SIZE_GROUP)
Event = {}
reader = csv.reader(file('user_event.csv','rb'))
for line in reader:
if line[1] in Event:
event[line[1]].append(line[0])
else:
event[line[1]] = [line[0]]
SIZE_EVENT = 0
for i in Event.keys():
SIZE_EVENT = len(Event[i])
for k in range(0,SIZE_EVENT):
if (k == (SIZE_EVENT - 1)):
break;
for j in range(k+1,SIZE_EVENT):
if (Event[i][k],Event[i][j]) in G2.edges():
G2[Event[i][k]][Event[i][j]]['weight'] = G2[Event[i][k]][Event[i][j]]['weight'] + 1.0/SIZE_EVENT
else:
G2.add_edge(Event[i][k],Event[i][j],weight = 1.0/SIZE_EVENT)
print "Clustering coefficient of G1:",average_clustering(G1,5000)
print "Clustering coefficient of G2:",average_clustering(G2,5000)
print "G1:",max_clique(G1)
print "G2:",max_clique(G2)
| 26.868852 | 113 | 0.563758 |
1a7fec1a6ce90326b708360d29a869afc172f3c6 | 2,603 | py | Python | v1/Utilities/mnistexample.py | denfed/ACDriver | b70a0d3677df2a797a1dd377af4aca6b1837a114 | [
"MIT"
] | 11 | 2018-12-15T00:18:17.000Z | 2021-11-12T18:44:23.000Z | v1/Utilities/mnistexample.py | denfed/ACDriver | b70a0d3677df2a797a1dd377af4aca6b1837a114 | [
"MIT"
] | null | null | null | v1/Utilities/mnistexample.py | denfed/ACDriver | b70a0d3677df2a797a1dd377af4aca6b1837a114 | [
"MIT"
] | 1 | 2019-12-31T05:09:16.000Z | 2019-12-31T05:09:16.000Z | '''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import numpy as np
batch_size = 25
epochs = 5
# input image dimensions
img_rows, img_cols = 60, 80
def load_data(path="traningdata-1.npy"):
f = np.load(path)
train = f[:450]
test = f[-50:]
x_train, y_train = train[:, :-1], train[:, -1]
x_test, y_test = test[:, :-1], test[:, -1]
return (x_train, y_train), (x_test, y_test)
#train_data = np.load("traningdata-1.npy")
# the data, shuffled and split between train and test sets
#(x_train, y_train), (x_test, y_test) = load_data()
#x_train=np.array(x_train)
#print(type(x_train))
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train)
#print(np.shape(x_train))
#print(train_data)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='sigmoid'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| 28.922222 | 70 | 0.695736 |
5abfa124026842a6bd7790ca100638a86a189c55 | 4,120 | py | Python | imageutil.py | jordan112/ComicTools | 1ec3403b17b46181fa70fe96ae006c9ae9789938 | [
"MIT"
] | null | null | null | imageutil.py | jordan112/ComicTools | 1ec3403b17b46181fa70fe96ae006c9ae9789938 | [
"MIT"
] | null | null | null | imageutil.py | jordan112/ComicTools | 1ec3403b17b46181fa70fe96ae006c9ae9789938 | [
"MIT"
] | null | null | null | from PIL import Image,ImageChops, ImageStat
import imagehash
import requests
from io import BytesIO
import base64
import os
def is_image(file):
try:
Image.open(file)
except IOError:
return False
return True
def getImageFromUrl(url):
image = requests.get(url,stream=True)
return Image.open(image.raw).convert('RGB')
def convert_image(file,type):
type = type.lower()
extension = os.path.splitext(file)[1].strip(".").lower()
if extension == type:
print(f"Source image type of {os.path.basename(file)} is the same as target type, skippping conversion.")
return True
else:
print(f"Converting {os.path.basename(file)} to {type}.")
try:
basename = os.path.splitext(file)[0]
image = Image.open(file).convert('RGB')
if type == "png":
image.save(f"{basename}.png",save_all=True,optimize=True)
return True
elif type == "jpg":
if extension == "jpeg":
return True
image.save(f"{basename}.jpg",quality=95,optimize=True)
return True
elif type == "webp":
image.save(f"{basename}.webp",save_all=True,lossless=True,optimize=True)
return True
elif type == "gif":
image.save(f"{basename}.gif",save_all=True)
return True
elif type == "tiff":
image.save(f"{basename}.tiff",quality=100,save_all=True)
return True
elif type == "bmp":
image.save(f"{basename}.bmp")
return True
else:
print(f"Target type {type} not supported.")
print("Supported image target types: png, jpg, webp, bmp, gif, tiff.")
return False
except Exception as e:
return False
def convert_dir_to_type(dir,type):
print(f"Converting image files to {type}.")
converted = False
type = type.lower()
for subdir, dirs, files in os.walk(dir):
for file in files:
filepath = subdir + os.sep + file
if is_image(filepath):
if not os.path.splitext(file)[1].strip(".").lower() == type.lower():
if not convert_image(filepath,type):
return False
os.remove(filepath)
else:
print(f"Source image type of {os.path.basename(file)} is the same as target type, skippping conversion.")
return True
def compare_images(image1,image2):
image1 = image1
image2 = image2
width1, height1 = image1.size
width2, height2 = image2.size
if width1*height1 > width2*height2:
image2 = image2.resize((width1,height1))
else:
image1 = image1.resize((width2,height2))
hash1 = imagehash.phash(image1)
hash2 = imagehash.phash(image2)
difference = hash1 - hash2
return (100 - difference)
def compare_images2(image1,image2):
image1 = image1
image2 = image2
width1, height1 = image1.size
width2, height2 = image2.size
if width1*height1 > width2*height2:
image2 = image2.resize((width1,height1))
else:
image1 = image1.resize((width2,height2))
diff_img = ImageChops.difference(image1, image2)
stat = ImageStat.Stat(diff_img)
diff_ratio = sum(stat.mean) / (len(stat.mean) * 255)
return round(100 - diff_ratio * 100)
def compare_images3(image1,image2):
image1 = image1
image2 = image2
width1, height1 = image1.size
width2, height2 = image2.size
if width1*height1 > width2*height2:
image2 = image2.resize((width1,height1))
else:
image1 = image1.resize((width2,height2))
diff_img = ImageChops.difference(image1, image2)
black_and_white_version = diff_img.convert('1')
black_pixels = black_and_white_version.histogram()[0]
total_pixels = diff_img.size[0] * diff_img.size[1]
similarity_ratio = black_pixels / total_pixels
return round(similarity_ratio * 100)
if __name__ == "__main__":
dir = "D:\Downloads\Way Of X #002 (2021)"
print(convert_dir_to_type(dir,"png"))
| 32.698413 | 134 | 0.613107 |
57bf0640c47eb4553376e69460c55744d88e4c1e | 3,684 | py | Python | services/github-bots/PredictLabels/application.py | mseth10/incubator-mxnet-ci | 36a5050b9c7bd720a4aa87d225738400083d611d | [
"Apache-2.0"
] | 1 | 2021-01-02T03:34:00.000Z | 2021-01-02T03:34:00.000Z | services/github-bots/PredictLabels/application.py | mseth10/incubator-mxnet-ci | 36a5050b9c7bd720a4aa87d225738400083d611d | [
"Apache-2.0"
] | null | null | null | services/github-bots/PredictLabels/application.py | mseth10/incubator-mxnet-ci | 36a5050b9c7bd720a4aa87d225738400083d611d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This is a web server built based on Flask framework and AWS Elastic Beanstalk service
# It will response to http GET/POST requests
from flask import Flask, jsonify, request, send_file
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from Predictor import Predictor
from Trainer import Trainer
import plot_piechart
import timeit
import atexit
import logging
import os.path
logging.getLogger().setLevel(logging.INFO)
application = Flask(__name__)
predictor = Predictor()
# GET '/'
@application.route('/')
def index():
return "Hello! -Bot"
# GET '/issues/<issue>'
# return predictions of an issue
@application.route('/issues/<issue>')
def get_prediction(issue):
l = predictor.predict([issue])
return " ".join(l[0])
# POST '/predict'
# return predictions of issues
@application.route('/predict', methods=['POST'])
def predict():
# get prediction results of multiple issues
# data would be a json file {"issues":[1,2,3]}
data = request.get_json()["issues"]
# predictions = predict_labels.predict(data)
predictions = []
if len(data) != 0:
predictions = predictor.predict(data)
response = []
for i in range(len(data)):
response.append({"number": data[i], "predictions": predictions[i]})
return jsonify(response)
# POST '/draw'
# return an image's binary code
@application.route('/draw', methods=['POST'])
def plot():
# requests.post(url,json={"fracs":[], "labels":[]})
data = request.get_json()
fracs = data["fracs"]
labels = data["labels"]
filename = plot_piechart.draw_pie(fracs, labels)
return send_file(filename, mimetype='image/png')
# helper function
def train_models():
start = timeit.default_timer()
trainer = Trainer()
tmp_dir = trainer.train()
stop = timeit.default_timer()
# reload models
predictor.reload(tmp_dir=tmp_dir)
time = int(stop - start)
logging.info("Training completed! Time cost: {} min, {} seconds".format(str(int(time/60)), str(time % 60)))
return
# Once the server is running, it will retrain ML models every 24 hours
@application.before_first_request
def initialize():
scheduler = BackgroundScheduler()
scheduler.start()
scheduler.add_job(
func=train_models,
trigger=IntervalTrigger(hours=24),
id='Training_Job',
name='Update models every 24 hours',
replace_existing=True)
# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())
# train initial models
train_models()
initialize()
# run the app.
if __name__ == "__main__":
# Set debug to True enables debug output.
# This 'application.debug = True' should be removed before deploying a production app.
application.debug = True
application.threaded = True
application.run('0.0.0.0', 8000)
| 30.446281 | 111 | 0.712541 |
b6da1ba9df5108a18b181ad9e9f8faf5e483e12b | 28,705 | py | Python | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_config_types.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_config_types.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_config_types.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | import json
from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector
import dagster._check as check
from dagster.config.config_type import ALL_CONFIG_BUILTINS
from dagster.utils import file_relative_path
from .graphql_context_test_suite import NonLaunchableGraphQLContextTestMatrix
from .setup import csv_hello_world_solids_config
CONFIG_VALIDATION_QUERY = """
query PipelineQuery(
$runConfigData: RunConfigData,
$pipeline: PipelineSelector!,
$mode: String!
) {
isPipelineConfigValid(
runConfigData: $runConfigData,
pipeline: $pipeline,
mode: $mode
) {
__typename
... on PipelineConfigValidationValid {
pipelineName
}
... on RunConfigValidationInvalid {
pipelineName
errors {
__typename
... on RuntimeMismatchConfigError {
valueRep
}
... on MissingFieldConfigError {
field { name }
}
... on MissingFieldsConfigError {
fields { name }
}
... on FieldNotDefinedConfigError {
fieldName
}
... on FieldsNotDefinedConfigError {
fieldNames
}
... on SelectorTypeConfigError {
incomingFields
}
message
reason
stack {
entries {
__typename
... on EvaluationStackPathEntry {
fieldName
}
... on EvaluationStackListItemEntry {
listIndex
}
... on EvaluationStackMapKeyEntry {
mapKey
}
... on EvaluationStackMapValueEntry {
mapKey
}
}
}
}
}
... on PipelineNotFoundError {
pipelineName
}
... on PythonError {
message
stack
}
}
}
"""
def field_stack(error_data):
return [
entry["fieldName"]
for entry in error_data["stack"]["entries"]
if entry["__typename"] == "EvaluationStackPathEntry"
]
def single_error_data(result):
assert len(result.data["isPipelineConfigValid"]["errors"]) == 1
return result.data["isPipelineConfigValid"]["errors"][0]
def find_error(result, field_stack_to_find, reason):
llist = list(find_errors(result, field_stack_to_find, reason))
assert len(llist) == 1
return llist[0]
def find_errors(result, field_stack_to_find, reason):
error_datas = result.data["isPipelineConfigValid"]["errors"]
for error_data in error_datas:
if field_stack_to_find == field_stack(error_data) and error_data["reason"] == reason:
yield error_data
def execute_config_graphql(context, pipeline_name, run_config, mode):
selector = infer_pipeline_selector(context, pipeline_name)
return execute_dagster_graphql(
context,
CONFIG_VALIDATION_QUERY,
{
"runConfigData": run_config,
"pipeline": selector,
"mode": mode,
},
)
class TestConfigTypes(NonLaunchableGraphQLContextTestMatrix):
def test_pipeline_not_found(self, graphql_context):
result = execute_config_graphql(
graphql_context, pipeline_name="nope", run_config={}, mode="default"
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "PipelineNotFoundError"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "nope"
def test_basic_valid_config(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="csv_hello_world",
run_config=csv_hello_world_solids_config(),
mode="default",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "PipelineConfigValidationValid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
def test_basic_valid_config_serialized_config(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="csv_hello_world",
run_config=json.dumps(csv_hello_world_solids_config()),
mode="default",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "PipelineConfigValidationValid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
def test_basic_valid_config_empty_string_config(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="csv_hello_world",
run_config="",
mode="default",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
def test_basic_valid_config_non_dict_config(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="csv_hello_world",
run_config="daggy",
mode="default",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
def test_root_field_not_defined(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="csv_hello_world",
run_config={
"solids": {
"sum_solid": {
"inputs": {"num": file_relative_path(__file__, "../data/num.csv")}
}
},
"nope": {},
},
mode="default",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
errors = result.data["isPipelineConfigValid"]["errors"]
assert len(errors) == 1
error = errors[0]
assert error["__typename"] == "FieldNotDefinedConfigError"
assert error["fieldName"] == "nope"
assert not error["stack"]["entries"]
def test_basic_invalid_not_defined_field(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="csv_hello_world",
run_config={"solids": {"sum_solid": {"inputs": {"num": "foo.txt", "extra": "nope"}}}},
mode="default",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
assert len(result.data["isPipelineConfigValid"]["errors"]) == 1
error_data = result.data["isPipelineConfigValid"]["errors"][0]
assert ["solids", "sum_solid", "inputs"] == field_stack(error_data)
assert error_data["reason"] == "FIELD_NOT_DEFINED"
assert error_data["fieldName"] == "extra"
def test_multiple_not_defined_fields(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="csv_hello_world",
run_config={
"solids": {
"sum_solid": {
"inputs": {"num": "foo.txt", "extra_one": "nope", "extra_two": "nope"}
}
}
},
mode="default",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
assert len(result.data["isPipelineConfigValid"]["errors"]) == 1
error_data = result.data["isPipelineConfigValid"]["errors"][0]
assert ["solids", "sum_solid", "inputs"] == field_stack(error_data)
assert error_data["reason"] == "FIELDS_NOT_DEFINED"
assert error_data["fieldNames"] == ["extra_one", "extra_two"]
def test_root_wrong_type(self, graphql_context):
result = execute_config_graphql(
graphql_context, pipeline_name="csv_hello_world", run_config=123, mode="default"
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
assert len(result.data["isPipelineConfigValid"]["errors"]) == 1
error_data = result.data["isPipelineConfigValid"]["errors"][0]
assert error_data["reason"] == "RUNTIME_TYPE_MISMATCH"
def test_basic_invalid_config_type_mismatch(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="csv_hello_world",
run_config={"solids": {"sum_solid": {"inputs": {"num": 123}}}},
mode="default",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
assert len(result.data["isPipelineConfigValid"]["errors"]) == 1
error_data = result.data["isPipelineConfigValid"]["errors"][0]
assert error_data["message"]
assert error_data["stack"]
assert error_data["stack"]["entries"]
assert error_data["reason"] == "RUNTIME_TYPE_MISMATCH"
assert error_data["valueRep"] == "123"
assert ["solids", "sum_solid", "inputs", "num"] == field_stack(error_data)
def test_basic_invalid_config_missing_field(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="csv_hello_world",
run_config={"solids": {"sum_solid": {"inputs": {}}}},
mode="default",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "csv_hello_world"
assert len(result.data["isPipelineConfigValid"]["errors"]) == 1
error_data = result.data["isPipelineConfigValid"]["errors"][0]
assert ["solids", "sum_solid", "inputs"] == field_stack(error_data)
assert error_data["reason"] == "MISSING_REQUIRED_FIELD"
assert error_data["field"]["name"] == "num"
def test_mode_resource_config_works(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="multi_mode_with_resources",
run_config={"resources": {"op": {"config": 2}}},
mode="add_mode",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "PipelineConfigValidationValid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "multi_mode_with_resources"
result = execute_config_graphql(
graphql_context,
pipeline_name="multi_mode_with_resources",
run_config={"resources": {"op": {"config": 2}}},
mode="mult_mode",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "PipelineConfigValidationValid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "multi_mode_with_resources"
result = execute_config_graphql(
graphql_context,
pipeline_name="multi_mode_with_resources",
run_config={"resources": {"op": {"config": {"num_one": 2, "num_two": 3}}}},
mode="double_adder",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "PipelineConfigValidationValid"
assert result.data["isPipelineConfigValid"]["pipelineName"] == "multi_mode_with_resources"
def test_missing_resource(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="multi_mode_with_resources",
run_config={"resources": {}},
mode="add_mode",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
error_data = single_error_data(result)
assert error_data["reason"] == "MISSING_REQUIRED_FIELD"
assert error_data["field"]["name"] == "op"
def test_undefined_resource(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="multi_mode_with_resources",
run_config={"resources": {"nope": {}}},
mode="add_mode",
)
assert not result.errors
assert result.data
assert result.data["isPipelineConfigValid"]["__typename"] == "RunConfigValidationInvalid"
assert {"FieldNotDefinedConfigError", "MissingFieldConfigError"} == {
error_data["__typename"]
for error_data in result.data["isPipelineConfigValid"]["errors"]
}
def test_more_complicated_works(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="more_complicated_nested_config",
run_config={
"solids": {
"a_solid_with_multilayered_config": {
"config": {
"field_any": {"123": 123},
"field_one": "foo.txt",
"field_two": "yup",
"field_three": "mmmhmmm",
"nested_field": {"field_four_str": "yaya", "field_five_int": 234},
}
}
}
},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "PipelineConfigValidationValid"
assert valid_data["pipelineName"] == "more_complicated_nested_config"
def test_multiple_missing_fields(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="more_complicated_nested_config",
run_config={"solids": {"a_solid_with_multilayered_config": {"config": {}}}},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "RunConfigValidationInvalid"
assert valid_data["pipelineName"] == "more_complicated_nested_config"
assert len(valid_data["errors"]) == 1
error_data = valid_data["errors"][0]
missing_names = {field_data["name"] for field_data in error_data["fields"]}
assert missing_names == {"nested_field", "field_one", "field_any"}
assert field_stack(error_data) == ["solids", "a_solid_with_multilayered_config", "config"]
def test_more_complicated_multiple_errors(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="more_complicated_nested_config",
run_config={
"solids": {
"a_solid_with_multilayered_config": {
"config": {
"field_any": [],
# 'field_one': 'foo.txt', # missing
"field_two": "yup",
"field_three": "mmmhmmm",
"extra_one": "kjsdkfjd", # extra
"nested_field": {
"field_four_str": 23434, # runtime type
"field_five_int": 234,
"extra_two": "ksjdkfjd", # another extra
},
}
}
}
},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "RunConfigValidationInvalid"
assert valid_data["pipelineName"] == "more_complicated_nested_config"
assert len(valid_data["errors"]) == 4
missing_error_one = find_error(
result,
["solids", "a_solid_with_multilayered_config", "config"],
"MISSING_REQUIRED_FIELD",
)
assert ["solids", "a_solid_with_multilayered_config", "config"] == field_stack(
missing_error_one
)
assert missing_error_one["reason"] == "MISSING_REQUIRED_FIELD"
assert missing_error_one["field"]["name"] == "field_one"
not_defined_one = find_error(
result, ["solids", "a_solid_with_multilayered_config", "config"], "FIELD_NOT_DEFINED"
)
assert ["solids", "a_solid_with_multilayered_config", "config"] == field_stack(
not_defined_one
)
assert not_defined_one["reason"] == "FIELD_NOT_DEFINED"
assert not_defined_one["fieldName"] == "extra_one"
dagster_type_error = find_error(
result,
[
"solids",
"a_solid_with_multilayered_config",
"config",
"nested_field",
"field_four_str",
],
"RUNTIME_TYPE_MISMATCH",
)
assert [
"solids",
"a_solid_with_multilayered_config",
"config",
"nested_field",
"field_four_str",
] == field_stack(dagster_type_error)
assert dagster_type_error["reason"] == "RUNTIME_TYPE_MISMATCH"
assert dagster_type_error["valueRep"] == "23434"
not_defined_two = find_error(
result,
["solids", "a_solid_with_multilayered_config", "config", "nested_field"],
"FIELD_NOT_DEFINED",
)
assert [
"solids",
"a_solid_with_multilayered_config",
"config",
"nested_field",
] == field_stack(not_defined_two)
assert not_defined_two["reason"] == "FIELD_NOT_DEFINED"
assert not_defined_two["fieldName"] == "extra_two"
# TODO: two more errors
def test_config_list(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="pipeline_with_list",
run_config={"solids": {"solid_with_list": {"config": [1, 2]}}},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "PipelineConfigValidationValid"
assert valid_data["pipelineName"] == "pipeline_with_list"
def test_config_list_invalid(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="pipeline_with_list",
run_config={"solids": {"solid_with_list": {"config": "foo"}}},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "RunConfigValidationInvalid"
assert valid_data["pipelineName"] == "pipeline_with_list"
assert len(valid_data["errors"]) == 1
assert ["solids", "solid_with_list", "config"] == field_stack(valid_data["errors"][0])
def test_config_list_item_invalid(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="pipeline_with_list",
run_config={"solids": {"solid_with_list": {"config": [1, "foo"]}}},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "RunConfigValidationInvalid"
assert valid_data["pipelineName"] == "pipeline_with_list"
assert len(valid_data["errors"]) == 1
entries = valid_data["errors"][0]["stack"]["entries"]
assert len(entries) == 4
assert ["solids", "solid_with_list", "config"] == field_stack(valid_data["errors"][0])
last_entry = entries[3]
assert last_entry["__typename"] == "EvaluationStackListItemEntry"
assert last_entry["listIndex"] == 1
def test_config_map(self, graphql_context):
# Check validity
result = execute_config_graphql(
graphql_context,
pipeline_name="config_with_map",
run_config={
"solids": {"a_solid_with_map_config": {"config": {"field_one": {"test": 5}}}}
},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "PipelineConfigValidationValid"
assert valid_data["pipelineName"] == "config_with_map"
# Sanity check GraphQL result for types
selector = infer_pipeline_selector(graphql_context, "config_with_map")
result = execute_dagster_graphql(
graphql_context,
ALL_CONFIG_TYPES_QUERY,
{"selector": selector, "mode": "default"},
)
config_types_data = result.data["runConfigSchemaOrError"]["allConfigTypes"]
# Ensure the first config type, Map(str, int, name="username") is in the result
assert any(
config_type_data.get("keyLabelName") == "username"
and config_type_data.get("keyType", {}).get("key", "") == "String"
and config_type_data.get("valueType", {}).get("key", "") == "Int"
for config_type_data in config_types_data
)
def test_config_map_invalid(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="config_with_map",
run_config={
"solids": {"a_solid_with_map_config": {"config": {"field_one": "not_a_map"}}}
},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "RunConfigValidationInvalid"
assert valid_data["pipelineName"] == "config_with_map"
assert len(valid_data["errors"]) == 1
assert ["solids", "a_solid_with_map_config", "config", "field_one"] == field_stack(
valid_data["errors"][0]
)
def test_config_map_key_invalid(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="config_with_map",
run_config={"solids": {"a_solid_with_map_config": {"config": {"field_one": {5: 5}}}}},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "RunConfigValidationInvalid"
assert valid_data["pipelineName"] == "config_with_map"
assert len(valid_data["errors"]) == 1
entries = valid_data["errors"][0]["stack"]["entries"]
assert len(entries) == 5
assert ["solids", "a_solid_with_map_config", "config", "field_one"] == field_stack(
valid_data["errors"][0]
)
last_entry = entries[4]
assert last_entry["__typename"] == "EvaluationStackMapKeyEntry"
assert last_entry["mapKey"] == 5
def test_config_map_value_invalid(self, graphql_context):
result = execute_config_graphql(
graphql_context,
pipeline_name="config_with_map",
run_config={
"solids": {
"a_solid_with_map_config": {
"config": {"field_one": {"test": "not_a_valid_int_value"}}
}
}
},
mode="default",
)
assert not result.errors
assert result.data
valid_data = result.data["isPipelineConfigValid"]
assert valid_data["__typename"] == "RunConfigValidationInvalid"
assert valid_data["pipelineName"] == "config_with_map"
assert len(valid_data["errors"]) == 1
entries = valid_data["errors"][0]["stack"]["entries"]
assert len(entries) == 5
assert ["solids", "a_solid_with_map_config", "config", "field_one"] == field_stack(
valid_data["errors"][0]
)
last_entry = entries[4]
assert last_entry["__typename"] == "EvaluationStackMapValueEntry"
assert last_entry["mapKey"] == "test"
def test_smoke_test_config_type_system(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "more_complicated_nested_config")
result = execute_dagster_graphql(
graphql_context,
ALL_CONFIG_TYPES_QUERY,
{"selector": selector, "mode": "default"},
)
config_types_data = result.data["runConfigSchemaOrError"]["allConfigTypes"]
assert has_config_type_with_key_prefix(config_types_data, "Shape.")
for builtin_config_type in ALL_CONFIG_BUILTINS:
assert has_config_type(config_types_data, builtin_config_type.given_name)
def pipeline_named(result, name):
for pipeline_data in result.data["pipelines"]["nodes"]:
if pipeline_data["name"] == name:
return pipeline_data
check.failed("Did not find")
def has_config_type_with_key_prefix(config_types_data, prefix):
for config_type_data in config_types_data:
if config_type_data["key"].startswith(prefix):
return True
return False
def has_config_type(config_types_data, name):
for config_type_data in config_types_data:
if config_type_data.get("givenName") == name:
return True
return False
ALL_CONFIG_TYPES_QUERY = """
fragment configTypeFragment on ConfigType {
__typename
key
description
isSelector
typeParamKeys
recursiveConfigTypes {
key
description
... on CompositeConfigType {
fields {
name
isRequired
description
}
}
... on WrappingConfigType {
ofType { key }
}
}
... on EnumConfigType {
givenName
values {
value
description
}
}
... on RegularConfigType {
givenName
}
... on CompositeConfigType {
fields {
name
isRequired
description
}
}
... on WrappingConfigType {
ofType { key }
}
... on MapConfigType {
keyType { key }
valueType { key }
keyLabelName
}
... on ScalarUnionConfigType {
scalarType { key }
nonScalarType { key }
}
}
query allConfigTypes($selector: PipelineSelector!, $mode: String!) {
runConfigSchemaOrError(selector: $selector, mode: $mode ) {
... on RunConfigSchema {
allConfigTypes {
...configTypeFragment
}
}
}
}
"""
def get_field_data(config_type_data, name):
for field_data in config_type_data["fields"]:
if field_data["name"] == name:
return field_data
def get_field_names(config_type_data):
return {field_data["name"] for field_data in config_type_data.get("fields", [])}
| 36.754161 | 100 | 0.598223 |
42938f064692148be833d1d275e2e8dcd4d6c0e6 | 1,383 | py | Python | scripts/retired/plot_bpmg_corner.py | tcrundall/chronostar | bdb5cd965e862ba5cc21bee75d5c8620e106c0cc | [
"MIT"
] | null | null | null | scripts/retired/plot_bpmg_corner.py | tcrundall/chronostar | bdb5cd965e862ba5cc21bee75d5c8620e106c0cc | [
"MIT"
] | null | null | null | scripts/retired/plot_bpmg_corner.py | tcrundall/chronostar | bdb5cd965e862ba5cc21bee75d5c8620e106c0cc | [
"MIT"
] | null | null | null | from __future__ import print_function, division
"""
Teensy script to plot corner plots for BPMG fits
"""
import numpy as np
import matplotlib.pyplot as plt
import corner
chainfiles = [
# '../results/em_fit/beta_Pic_solo_results/final_chain.npy',
# '../results/em_fit/beta_Pic_results/group0/final_chain.npy',
'../results/em_fit/beta_Pictoris_wgs_inv2/final_chain.npy',
]
labels = [
'X [pc]',
'Y [pc]',
'Z [pc]',
'U [km/s]',
'V [km/s]',
'W [km/s]',
r'$\sigma_{xyz}$',
r'$\sigma_{uvw}$',
't [Myr]',
]
plot_names = [
# 'bpmg_solo_corner.pdf',
# 'bpmg_corner.pdf',
'bpmg_5B_conrer.pdf',
]
rev_flags = [
True,
False,
True,
]
for chainfile, plot_name, rev_flag in zip(chainfiles, plot_names,
rev_flags):
print("Plotting {}".format(plot_name))
chain = np.load(chainfile).reshape(-1,9)
chain[:,6:8] = np.exp(chain[:,6:8])
# plt.tick_params(direction='in')
fig = corner.corner(
chain,
labels=labels,
# reverse=True,
label_kwargs={'fontsize':'xx-large'},
max_n_ticks=4,
)
print("Applying tick parameters")
for ax in fig.axes:
ax.tick_params(direction='in', labelsize='x-large', top=True,
right=True)
print("... saving")
plt.savefig('temp_plots/' + plot_name)
| 22.672131 | 69 | 0.583514 |
82e55f71d8cc31e9e1dab05115e986d3cbfa0a1e | 247 | py | Python | examples/context.py | satr-cowi/DynSys | a41dcf056f3104007cce87eb6eb09c7272db10f7 | [
"MIT"
] | null | null | null | examples/context.py | satr-cowi/DynSys | a41dcf056f3104007cce87eb6eb09c7272db10f7 | [
"MIT"
] | 1 | 2019-11-08T13:15:51.000Z | 2019-11-08T13:15:51.000Z | examples/context.py | dancergraham/DynSys | c7368ced667adb02506a8dc8ca7335a86ab7d5b2 | [
"MIT"
] | 1 | 2022-01-27T07:48:24.000Z | 2022-01-27T07:48:24.000Z | # -*- coding: utf-8 -*-
"""
Gives `examples` context to modules in this package
"""
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from dynsys import dynsys
from dynsys import modalsys
| 16.466667 | 82 | 0.700405 |
c44e775570a41662471d4e34d3991071cd8088ab | 22,127 | py | Python | backtrader/backtrader/stores/oandastore.py | harshabakku/live-back-testing-trader | 1fd69c7598dc15bea740f160eed886f396bcba2c | [
"MIT"
] | 1 | 2021-07-14T22:04:08.000Z | 2021-07-14T22:04:08.000Z | backtrader/backtrader/stores/oandastore.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | null | null | null | backtrader/backtrader/stores/oandastore.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | 3 | 2021-03-07T16:29:40.000Z | 2022-03-17T21:42:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
from datetime import datetime, timedelta
import time as _time
import json
import threading
import oandapy
import requests # oandapy depdendency
import backtrader as bt
from backtrader.metabase import MetaParams
from backtrader.utils.py3 import queue, with_metaclass
from backtrader.utils import AutoDict
# Extend the exceptions to support extra cases
class OandaRequestError(oandapy.OandaError):
def __init__(self):
er = dict(code=599, message='Request Error', description='')
super(self.__class__, self).__init__(er)
class OandaStreamError(oandapy.OandaError):
def __init__(self, content=''):
er = dict(code=598, message='Failed Streaming', description=content)
super(self.__class__, self).__init__(er)
class OandaTimeFrameError(oandapy.OandaError):
def __init__(self, content):
er = dict(code=597, message='Not supported TimeFrame', description='')
super(self.__class__, self).__init__(er)
class OandaNetworkError(oandapy.OandaError):
def __init__(self):
er = dict(code=596, message='Network Error', description='')
super(self.__class__, self).__init__(er)
class API(oandapy.API):
def request(self, endpoint, method='GET', params=None):
# Overriden to make something sensible out of a
# request.RequestException rather than simply issuing a print(str(e))
url = '%s/%s' % (self.api_url, endpoint)
method = method.lower()
params = params or {}
func = getattr(self.client, method)
request_args = {}
if method == 'get':
request_args['params'] = params
else:
request_args['data'] = params
# Added the try block
try:
response = func(url, **request_args)
except requests.RequestException as e:
return OandaRequestError().error_response
content = response.content.decode('utf-8')
content = json.loads(content)
# error message
if response.status_code >= 400:
# changed from raise to return
return oandapy.OandaError(content).error_response
return content
class Streamer(oandapy.Streamer):
def __init__(self, q, headers=None, *args, **kwargs):
# Override to provide headers, which is in the standard API interface
super(Streamer, self).__init__(*args, **kwargs)
if headers:
self.client.headers.update(headers)
self.q = q
def run(self, endpoint, params=None):
# Override to better manage exceptions.
# Kept as much as possible close to the original
self.connected = True
params = params or {}
ignore_heartbeat = None
if 'ignore_heartbeat' in params:
ignore_heartbeat = params['ignore_heartbeat']
request_args = {}
request_args['params'] = params
url = '%s/%s' % (self.api_url, endpoint)
while self.connected:
# Added exception control here
try:
response = self.client.get(url, **request_args)
except requests.RequestException as e:
self.q.put(OandaRequestError().error_response)
break
if response.status_code != 200:
self.on_error(response.content)
break # added break here
# Changed chunk_size 90 -> None
try:
for line in response.iter_lines(chunk_size=None):
if not self.connected:
break
if line:
data = json.loads(line.decode('utf-8'))
if not (ignore_heartbeat and 'heartbeat' in data):
self.on_success(data)
except: # socket.error has been seen
self.q.put(OandaStreamError().error_response)
break
def on_success(self, data):
if 'tick' in data:
self.q.put(data['tick'])
elif 'transaction' in data:
self.q.put(data['transaction'])
def on_error(self, data):
self.disconnect()
self.q.put(OandaStreamError(data).error_response)
class MetaSingleton(MetaParams):
'''Metaclass to make a metaclassed class a singleton'''
def __init__(cls, name, bases, dct):
super(MetaSingleton, cls).__init__(name, bases, dct)
cls._singleton = None
def __call__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = (
super(MetaSingleton, cls).__call__(*args, **kwargs))
return cls._singleton
class OandaStore(with_metaclass(MetaSingleton, object)):
'''Singleton class wrapping to control the connections to Oanda.
Params:
- ``token`` (default:``None``): API access token
- ``account`` (default: ``None``): account id
- ``practice`` (default: ``False``): use the test environment
- ``account_tmout`` (default: ``10.0``): refresh period for account
value/cash refresh
'''
BrokerCls = None # broker class will autoregister
DataCls = None # data class will auto register
params = (
('token', ''),
('account', ''),
('practice', False),
('account_tmout', 10.0), # account balance refresh timeout
)
_DTEPOCH = datetime(1970, 1, 1)
_ENVPRACTICE = 'practice'
_ENVLIVE = 'live'
@classmethod
def getdata(cls, *args, **kwargs):
'''Returns ``DataCls`` with args, kwargs'''
return cls.DataCls(*args, **kwargs)
@classmethod
def getbroker(cls, *args, **kwargs):
'''Returns broker with *args, **kwargs from registered ``BrokerCls``'''
return cls.BrokerCls(*args, **kwargs)
def __init__(self):
super(OandaStore, self).__init__()
self.notifs = collections.deque() # store notifications for cerebro
self._env = None # reference to cerebro for general notifications
self.broker = None # broker instance
self.datas = list() # datas that have registered over start
self._orders = collections.OrderedDict() # map order.ref to oid
self._ordersrev = collections.OrderedDict() # map oid to order.ref
self._transpend = collections.defaultdict(collections.deque)
self._oenv = self._ENVPRACTICE if self.p.practice else self._ENVLIVE
self.oapi = API(environment=self._oenv,
access_token=self.p.token,
headers={'X-Accept-Datetime-Format': 'UNIX'})
self._cash = 0.0
self._value = 0.0
self._evt_acct = threading.Event()
def start(self, data=None, broker=None):
# Datas require some processing to kickstart data reception
if data is None and broker is None:
self.cash = None
return
if data is not None:
self._env = data._env
# For datas simulate a queue with None to kickstart co
self.datas.append(data)
if self.broker is not None:
self.broker.data_started(data)
elif broker is not None:
self.broker = broker
self.streaming_events()
self.broker_threads()
def stop(self):
# signal end of thread
if self.broker is not None:
self.q_ordercreate.put(None)
self.q_orderclose.put(None)
self.q_account.put(None)
def put_notification(self, msg, *args, **kwargs):
self.notifs.append((msg, args, kwargs))
def get_notifications(self):
'''Return the pending "store" notifications'''
self.notifs.append(None) # put a mark / threads could still append
return [x for x in iter(self.notifs.popleft, None)]
# Oanda supported granularities
_GRANULARITIES = {
(bt.TimeFrame.Seconds, 5): 'S5',
(bt.TimeFrame.Seconds, 10): 'S10',
(bt.TimeFrame.Seconds, 15): 'S15',
(bt.TimeFrame.Seconds, 30): 'S30',
(bt.TimeFrame.Minutes, 1): 'M1',
(bt.TimeFrame.Minutes, 2): 'M3',
(bt.TimeFrame.Minutes, 3): 'M3',
(bt.TimeFrame.Minutes, 4): 'M4',
(bt.TimeFrame.Minutes, 5): 'M5',
(bt.TimeFrame.Minutes, 10): 'M5',
(bt.TimeFrame.Minutes, 15): 'M5',
(bt.TimeFrame.Minutes, 30): 'M5',
(bt.TimeFrame.Minutes, 60): 'H1',
(bt.TimeFrame.Minutes, 120): 'H2',
(bt.TimeFrame.Minutes, 180): 'H3',
(bt.TimeFrame.Minutes, 240): 'H4',
(bt.TimeFrame.Minutes, 360): 'H6',
(bt.TimeFrame.Minutes, 480): 'H8',
(bt.TimeFrame.Days, 1): 'D',
(bt.TimeFrame.Weeks, 1): 'W',
(bt.TimeFrame.Months, 1): 'M',
}
def get_positions(self):
try:
positions = self.oapi.get_positions(self.p.account)
except (oandapy.OandaError, OandaRequestError,):
return None
poslist = positions.get('positions', [])
return poslist
def get_granularity(self, timeframe, compression):
return self._GRANULARITIES.get((timeframe, compression), None)
def get_instrument(self, dataname):
try:
insts = self.oapi.get_instruments(self.p.account,
instruments=dataname)
except (oandapy.OandaError, OandaRequestError,):
return None
i = insts.get('instruments', [{}])
return i[0] or None
def streaming_events(self, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_listener, kwargs=kwargs)
t.daemon = True
t.start()
t = threading.Thread(target=self._t_streaming_events, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_streaming_listener(self, q, tmout=None):
while True:
trans = q.get()
self._transaction(trans)
def _t_streaming_events(self, q, tmout=None):
if tmout is not None:
_time.sleep(tmout)
streamer = Streamer(q,
environment=self._oenv,
access_token=self.p.token,
headers={'X-Accept-Datetime-Format': 'UNIX'})
streamer.events(ignore_heartbeat=False)
def candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst):
kwargs = locals().copy()
kwargs.pop('self')
kwargs['q'] = q = queue.Queue()
t = threading.Thread(target=self._t_candles, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst, q):
granularity = self.get_granularity(timeframe, compression)
if granularity is None:
e = OandaTimeFrameError()
q.put(e.error_response)
return
dtkwargs = {}
if dtbegin is not None:
dtkwargs['start'] = int((dtbegin - self._DTEPOCH).total_seconds())
if dtend is not None:
dtkwargs['end'] = int((dtend - self._DTEPOCH).total_seconds())
try:
response = self.oapi.get_history(instrument=dataname,
granularity=granularity,
candleFormat=candleFormat,
**dtkwargs)
except oandapy.OandaError as e:
q.put(e.error_response)
q.put(None)
return
for candle in response.get('candles', []):
q.put(candle)
q.put({}) # end of transmission
def streaming_prices(self, dataname, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'dataname': dataname, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_prices, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_streaming_prices(self, dataname, q, tmout):
if tmout is not None:
_time.sleep(tmout)
streamer = Streamer(q, environment=self._oenv,
access_token=self.p.token,
headers={'X-Accept-Datetime-Format': 'UNIX'})
streamer.rates(self.p.account, instruments=dataname)
def get_cash(self):
return self._cash
def get_value(self):
return self._value
_ORDEREXECS = {
bt.Order.Market: 'market',
bt.Order.Limit: 'limit',
bt.Order.Stop: 'stop',
bt.Order.StopLimit: 'stop',
}
def broker_threads(self):
self.q_account = queue.Queue()
self.q_account.put(True) # force an immediate update
t = threading.Thread(target=self._t_account)
t.daemon = True
t.start()
self.q_ordercreate = queue.Queue()
t = threading.Thread(target=self._t_order_create)
t.daemon = True
t.start()
self.q_orderclose = queue.Queue()
t = threading.Thread(target=self._t_order_cancel)
t.daemon = True
t.start()
# Wait once for the values to be set
self._evt_acct.wait(self.p.account_tmout)
def _t_account(self):
while True:
try:
msg = self.q_account.get(timeout=self.p.account_tmout)
if msg is None:
break # end of thread
except queue.Empty: # tmout -> time to refresh
pass
try:
accinfo = self.oapi.get_account(self.p.account)
except Exception as e:
self.put_notification(e)
continue
try:
self._cash = accinfo['marginAvail']
self._value = accinfo['balance']
except KeyError:
pass
self._evt_acct.set()
def order_create(self, order, stopside=None, takeside=None, **kwargs):
okwargs = dict()
okwargs['instrument'] = order.data._dataname
okwargs['units'] = abs(order.created.size)
okwargs['side'] = 'buy' if order.isbuy() else 'sell'
okwargs['type'] = self._ORDEREXECS[order.exectype]
if order.exectype != bt.Order.Market:
okwargs['price'] = order.created.price
if order.valid is None:
# 1 year and datetime.max fail ... 1 month works
valid = datetime.utcnow() + timedelta(days=30)
else:
valid = order.data.num2date(order.valid)
# To timestamp with seconds precision
okwargs['expiry'] = int((valid - self._DTEPOCH).total_seconds())
if order.exectype == bt.Order.StopLimit:
okwargs['lowerBound'] = order.created.pricelimit
okwargs['upperBound'] = order.created.pricelimit
if order.exectype == bt.Order.StopTrail:
okwargs['trailingStop'] = order.trailamount
if stopside is not None:
okwargs['stopLoss'] = stopside.price
if takeside is not None:
okwargs['takeProfit'] = takeside.price
okwargs.update(**kwargs) # anything from the user
self.q_ordercreate.put((order.ref, okwargs,))
return order
_OIDSINGLE = ['orderOpened', 'tradeOpened', 'tradeReduced']
_OIDMULTIPLE = ['tradesClosed']
def _t_order_create(self):
while True:
msg = self.q_ordercreate.get()
if msg is None:
break
oref, okwargs = msg
try:
o = self.oapi.create_order(self.p.account, **okwargs)
except Exception as e:
self.put_notification(e)
self.broker._reject(oref)
return
# Ids are delivered in different fields and all must be fetched to
# match them (as executions) to the order generated here
oids = list()
for oidfield in self._OIDSINGLE:
if oidfield in o and 'id' in o[oidfield]:
oids.append(o[oidfield]['id'])
for oidfield in self._OIDMULTIPLE:
if oidfield in o:
for suboidfield in o[oidfield]:
oids.append(suboidfield['id'])
if not oids:
self.broker._reject(oref)
return
self._orders[oref] = oids[0]
self.broker._submit(oref)
if okwargs['type'] == 'market':
self.broker._accept(oref) # taken immediately
for oid in oids:
self._ordersrev[oid] = oref # maps ids to backtrader order
# An transaction may have happened and was stored
tpending = self._transpend[oid]
tpending.append(None) # eom marker
while True:
trans = tpending.popleft()
if trans is None:
break
self._process_transaction(oid, trans)
def order_cancel(self, order):
self.q_orderclose.put(order.ref)
return order
def _t_order_cancel(self):
while True:
oref = self.q_orderclose.get()
if oref is None:
break
oid = self._orders.get(oref, None)
if oid is None:
continue # the order is no longer there
try:
o = self.oapi.close_order(self.p.account, oid)
except Exception as e:
continue # not cancelled - FIXME: notify
self.broker._cancel(oref)
_X_ORDER_CREATE = ('STOP_ORDER_CREATE',
'LIMIT_ORDER_CREATE', 'MARKET_IF_TOUCHED_ORDER_CREATE',)
def _transaction(self, trans):
# Invoked from Streaming Events. May actually receive an event for an
# oid which has not yet been returned after creating an order. Hence
# store if not yet seen, else forward to processer
ttype = trans['type']
if ttype == 'MARKET_ORDER_CREATE':
try:
oid = trans['tradeReduced']['id']
except KeyError:
try:
oid = trans['tradeOpened']['id']
except KeyError:
return # cannot do anything else
elif ttype in self._X_ORDER_CREATE:
oid = trans['id']
elif ttype == 'ORDER_FILLED':
oid = trans['orderId']
elif ttype == 'ORDER_CANCEL':
oid = trans['orderId']
elif ttype == 'TRADE_CLOSE':
oid = trans['id']
pid = trans['tradeId']
if pid in self._orders and False: # Know nothing about trade
return # can do nothing
# Skip above - at the moment do nothing
# Received directly from an event in the WebGUI for example which
# closes an existing position related to order with id -> pid
# COULD BE DONE: Generate a fake counter order to gracefully
# close the existing position
msg = ('Received TRADE_CLOSE for unknown order, possibly generated'
' over a different client or GUI')
self.put_notification(msg, trans)
return
else: # Go aways gracefully
try:
oid = trans['id']
except KeyError:
oid = 'None'
msg = 'Received {} with oid {}. Unknown situation'
msg = msg.format(ttype, oid)
self.put_notification(msg, trans)
return
try:
oref = self._ordersrev[oid]
self._process_transaction(oid, trans)
except KeyError: # not yet seen, keep as pending
self._transpend[oid].append(trans)
_X_ORDER_FILLED = ('MARKET_ORDER_CREATE',
'ORDER_FILLED', 'TAKE_PROFIT_FILLED',
'STOP_LOSS_FILLED', 'TRAILING_STOP_FILLED',)
def _process_transaction(self, oid, trans):
try:
oref = self._ordersrev.pop(oid)
except KeyError:
return
ttype = trans['type']
if ttype in self._X_ORDER_FILLED:
size = trans['units']
if trans['side'] == 'sell':
size = -size
price = trans['price']
self.broker._fill(oref, size, price, ttype=ttype)
elif ttype in self._X_ORDER_CREATE:
self.broker._accept(oref)
self._ordersrev[oid] = oref
elif ttype in 'ORDER_CANCEL':
reason = trans['reason']
if reason == 'ORDER_FILLED':
pass # individual execs have done the job
elif reason == 'TIME_IN_FORCE_EXPIRED':
self.broker._expire(oref)
elif reason == 'CLIENT_REQUEST':
self.broker._cancel(oref)
else: # default action ... if nothing else
self.broker._reject(oref)
| 33.525758 | 79 | 0.568853 |
7a539c7fd7cc2300cec9aa3b5c0541583e786722 | 1,143 | py | Python | teamcat_service/doraemon/doraemon/api/project/serializer/project_issue_statistics_serializer.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 6 | 2018-11-26T08:42:52.000Z | 2020-06-01T08:33:48.000Z | teamcat_service/doraemon/doraemon/api/project/serializer/project_issue_statistics_serializer.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | null | null | null | teamcat_service/doraemon/doraemon/api/project/serializer/project_issue_statistics_serializer.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 1 | 2019-01-22T06:45:36.000Z | 2019-01-22T06:45:36.000Z | #coding=utf-8
'''
Created on 2016-10-12
@author: Administrator
'''
from rest_framework import serializers
from doraemon.project.models import ProjectIssueDailyStatistics,ProjectIssueVersionStatistics
class IssueTrendStatisticsSerializer(serializers.Serializer):
chart_id=serializers.IntegerField()
project_id=serializers.IntegerField()
version_id=serializers.IntegerField()
chart_type=serializers.CharField()
chart_title=serializers.CharField()
chart_sub_title=serializers.CharField()
xaxis=serializers.ListField()
yaxis=serializers.ListField()
tooltip=serializers.CharField()
series_data=serializers.ListField()
def save(self):
raise Exception("only get request")
class IssueDailyStatisticsSerializer(serializers.ModelSerializer):
class Meta:
model = ProjectIssueDailyStatistics
exclude=('IsActive',)
read_only_fields = ('id',)
class IssueVersionStatisticsSerializer(serializers.ModelSerializer):
class Meta:
model = ProjectIssueVersionStatistics
exclude=('IsActive',)
read_only_fields = ('id',)
| 27.878049 | 93 | 0.733158 |
1e4588816496ab14b7efe2b8cbadfa390897add2 | 2,486 | py | Python | interactive_predict.py | AmmarHarrat/Test_code2vec | fc09b7c1db950bfe3a8f7991460d515a8c24005f | [
"MIT"
] | 2 | 2021-02-03T08:50:00.000Z | 2021-04-05T18:36:43.000Z | interactive_predict.py | AmmarHarrat/Test_code2vec | fc09b7c1db950bfe3a8f7991460d515a8c24005f | [
"MIT"
] | 2 | 2020-12-03T13:15:55.000Z | 2022-02-10T02:06:22.000Z | interactive_predict.py | AmmarHarrat/Test_code2vec | fc09b7c1db950bfe3a8f7991460d515a8c24005f | [
"MIT"
] | 2 | 2021-04-08T14:51:01.000Z | 2021-09-22T07:57:37.000Z | import traceback
from keras.models import Model
from common import common
from extractor import Extractor
SHOW_TOP_CONTEXTS = 10
MAX_PATH_LENGTH = 8
MAX_PATH_WIDTH = 1
JAR_PATH = 'cd2vec/cli.jar'
class InteractivePredictor:
exit_keywords = ['exit', 'quit', 'q']
def __init__(self, config, model):
# model.predict([])
self.model = model
self.config = config
self.path_extractor = Extractor(config,
jar_path=JAR_PATH,
max_path_length=MAX_PATH_LENGTH,
max_path_width=MAX_PATH_WIDTH)
def read_file(self, input_filename):
with open(input_filename, 'r') as file:
return file.readlines()
def predict(self):
input_filename = 'pred_files/Input.py'
print('Starting interactive prediction...')
while True:
print(
'Modify the file: "%s" and press any key when ready, or "q" / "quit" / "exit" to exit' % input_filename)
user_input = input()
if user_input.lower() in self.exit_keywords:
print('Exiting...')
return
try:
predict_lines, hash_to_string_dict = self.path_extractor.extract_paths(input_filename)
except ValueError as e:
print(e)
raw_prediction_results = self.model.predict(predict_lines)
method_prediction_results = common.parse_prediction_results(
raw_prediction_results, hash_to_string_dict,
self.model.vocabs.target_vocab.special_words, topk=SHOW_TOP_CONTEXTS)
for raw_prediction, method_prediction in zip(raw_prediction_results, method_prediction_results):
print('Original name:\t' + method_prediction.original_name)
for name_prob_pair in method_prediction.predictions:
print('\t(%f) predicted: %s' % (name_prob_pair['probability'], name_prob_pair['name']))
print('Attention:')
for attention_obj in method_prediction.attention_paths:
print('%f\tcontext: %s,%s,%s' % (
attention_obj['score'], attention_obj['token1'], attention_obj['path'], attention_obj['token2']))
if self.config.EXPORT_CODE_VECTORS:
print('Code vector:')
print(' '.join(map(str, raw_prediction.code_vector)))
| 42.135593 | 120 | 0.597345 |
388e82aead6c1929c82cd66074f113f438892e8d | 2,705 | py | Python | kde/pim/kalarm/kalarm.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 14 | 2017-09-04T09:01:03.000Z | 2022-01-04T20:09:00.000Z | kde/pim/kalarm/kalarm.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 14 | 2017-12-15T08:11:22.000Z | 2020-12-29T19:11:13.000Z | kde/pim/kalarm/kalarm.py | KDE/craft-blueprints-kde | 14932d4b95ce0070ab8ae5669411c62ffa304c9b | [
"BSD-2-Clause"
] | 19 | 2017-09-05T19:16:21.000Z | 2020-10-18T12:46:06.000Z | import info
class subinfo(info.infoclass):
def setTargets(self):
self.versionInfo.setDefaultValues()
self.description = "KAlarm"
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
self.buildDependencies["kde/frameworks/extra-cmake-modules"] = None
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["kde/frameworks/tier1/ki18n"] = None
self.runtimeDependencies["kde/frameworks/tier1/kcodecs"] = None
self.runtimeDependencies["kde/frameworks/tier2/kcompletion"] = None
self.runtimeDependencies["kde/frameworks/tier1/kcoreaddons"] = None
self.runtimeDependencies["kde/frameworks/tier1/kconfig"] = None
self.runtimeDependencies["kde/frameworks/tier1/kdbusaddons"] = None
self.runtimeDependencies["kde/frameworks/tier1/kidletime"] = None
self.runtimeDependencies["kde/frameworks/tier3/kiconthemes"] = None
self.runtimeDependencies["kde/frameworks/tier1/kitemmodels"] = None
self.runtimeDependencies["kde/frameworks/tier3/kio"] = None
self.runtimeDependencies["kde/frameworks/tier2/kjobwidgets"] = None
self.runtimeDependencies["kde/frameworks/tier3/knewstuff"] = None
self.runtimeDependencies["kde/frameworks/tier3/kservice"] = None
self.runtimeDependencies["kde/frameworks/tier3/kwallet"] = None
self.runtimeDependencies["kde/frameworks/tier1/kwidgetsaddons"] = None
self.runtimeDependencies["kde/frameworks/tier1/kwindowsystem"] = None
self.runtimeDependencies["kde/frameworks/tier3/kxmlgui"] = None
self.runtimeDependencies["kde/frameworks/tier4/kdelibs4support"] = None
self.runtimeDependencies["kde/pim/kalarmcal"] = None
self.runtimeDependencies["kde/pim/kmime"] = None
self.runtimeDependencies["kde/pim/akonadi"] = None
self.runtimeDependencies["kde/pim/akonadi-contacts"] = None
self.runtimeDependencies["kde/pim/akonadi-mime"] = None
self.runtimeDependencies["kde/pim/kpimtextedit"] = None
self.runtimeDependencies["kde/frameworks/tier1/kholidays"] = None
self.runtimeDependencies["kde/pim/kidentitymanagement"] = None
self.runtimeDependencies["kde/pim/kimap"] = None
self.runtimeDependencies["kde/pim/mailcommon"] = None
self.runtimeDependencies["kde/pim/kmailtransport"] = None
self.runtimeDependencies["kde/pim/kdepim-runtime"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
self.subinfo.options.configure.args += "-DUSE_UNITY_CMAKE_SUPPORT=ON "
| 49.181818 | 79 | 0.716081 |
038b4b77acdef4a63765718ed80fb7086f733922 | 512 | py | Python | ecommercejockey/premier/migrations/0005_premierproduct_primary_image.py | anniethiessen/dieselr-ecommerce | 9268b72553845a4650cdfe7c88b398db3cf92258 | [
"MIT"
] | null | null | null | ecommercejockey/premier/migrations/0005_premierproduct_primary_image.py | anniethiessen/dieselr-ecommerce | 9268b72553845a4650cdfe7c88b398db3cf92258 | [
"MIT"
] | 11 | 2020-06-06T00:04:26.000Z | 2022-03-12T00:57:41.000Z | ecommercejockey/premier/migrations/0005_premierproduct_primary_image.py | anniethiessen/ecommerce-jockey | 9268b72553845a4650cdfe7c88b398db3cf92258 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-10-24 23:05
from django.db import migrations, models
import premier.utils
class Migration(migrations.Migration):
dependencies = [
('premier', '0004_premiermanufacturer_is_relevant'),
]
operations = [
migrations.AlterField(
model_name='premierproduct',
name='primary_image',
field=models.ImageField(blank=True, max_length=200, null=True, upload_to=premier.utils.premier_product_image_path),
),
]
| 25.6 | 127 | 0.669922 |
8718703dd695350a4a3687075701a28bab73dddd | 3,439 | py | Python | examples/quiz/schema.py | toluaina/essync | 4a0119d99760eaa193f4ae60abd2b5f38482b280 | [
"BSD-3-Clause"
] | 1 | 2019-09-26T21:05:37.000Z | 2019-09-26T21:05:37.000Z | examples/quiz/schema.py | toluaina/essync | 4a0119d99760eaa193f4ae60abd2b5f38482b280 | [
"BSD-3-Clause"
] | null | null | null | examples/quiz/schema.py | toluaina/essync | 4a0119d99760eaa193f4ae60abd2b5f38482b280 | [
"BSD-3-Clause"
] | 1 | 2019-08-27T16:19:09.000Z | 2019-08-27T16:19:09.000Z | import json
import click
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import ForeignKeyConstraint, UniqueConstraint
from pgsync.base import create_database, pg_engine
from pgsync.helper import teardown
from pgsync.utils import get_config
Base = declarative_base()
class Category(Base):
__tablename__ = "category"
__table_args__ = (UniqueConstraint("text"),)
id = sa.Column(sa.Integer, primary_key=True)
uid = sa.Column(sa.String, primary_key=True)
text = sa.Column(sa.String, nullable=False)
class Question(Base):
__tablename__ = "question"
__table_args__ = (
UniqueConstraint("text"),
ForeignKeyConstraint(
["category_id", "category_uid"], ["category.id", "category.uid"]
),
)
id = sa.Column(sa.Integer, primary_key=True)
uid = sa.Column(sa.String, primary_key=True)
category_id = sa.Column(sa.Integer)
category_uid = sa.Column(sa.String)
text = sa.Column(sa.String, nullable=False)
class Answer(Base):
__tablename__ = "answer"
__table_args__ = (UniqueConstraint("text"),)
id = sa.Column(sa.Integer, primary_key=True)
uid = sa.Column(sa.String, primary_key=True)
text = sa.Column(sa.String, nullable=False)
class PossibleAnswer(Base):
__tablename__ = "possible_answer"
__table_args__ = (
UniqueConstraint(
"question_id",
"question_uid",
"answer_id",
"answer_uid",
),
ForeignKeyConstraint(
["answer_id", "answer_uid"],
["answer.id", "answer.uid"],
),
ForeignKeyConstraint(
["question_id", "question_uid"],
["question.id", "question.uid"],
),
)
question_id = sa.Column(sa.Integer, primary_key=True)
question_uid = sa.Column(sa.String, primary_key=True)
answer_id = sa.Column(sa.Integer, primary_key=True)
answer_uid = sa.Column(sa.String, primary_key=True)
answer = sa.orm.relationship(Answer, backref=sa.orm.backref("answer"))
class RealAnswer(Base):
__tablename__ = "real_answer"
__table_args__ = (
UniqueConstraint(
"question_id",
"question_uid",
"answer_id",
"answer_uid",
),
ForeignKeyConstraint(
["answer_id", "answer_uid"],
["answer.id", "answer.uid"],
),
ForeignKeyConstraint(
["question_id", "question_uid"],
["question.id", "question.uid"],
),
)
question_id = sa.Column(
sa.Integer,
primary_key=True,
)
question_uid = sa.Column(
sa.String,
primary_key=True,
)
answer_id = sa.Column(
sa.Integer,
primary_key=True,
)
answer_uid = sa.Column(
sa.String,
primary_key=True,
)
def setup(config=None):
for document in json.load(open(config)):
database = document.get("database", document["index"])
create_database(database)
engine = pg_engine(database=database)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
@click.command()
@click.option(
"--config",
"-c",
help="Schema config",
type=click.Path(exists=True),
)
def main(config):
config = get_config(config)
teardown(config=config)
setup(config)
if __name__ == "__main__":
main()
| 26.05303 | 76 | 0.62082 |
17ebbe2de10bed978371557c00d94e2fc880d271 | 61,391 | py | Python | brutus/plotting.py | catherinezucker/brutus | 28d0a953e5f05b3295aab23cebc974ece7a1e300 | [
"MIT"
] | null | null | null | brutus/plotting.py | catherinezucker/brutus | 28d0a953e5f05b3295aab23cebc974ece7a1e300 | [
"MIT"
] | null | null | null | brutus/plotting.py | catherinezucker/brutus | 28d0a953e5f05b3295aab23cebc974ece7a1e300 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plotting utilities.
"""
from __future__ import (print_function, division)
from six.moves import range
import warnings
import logging
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator, NullLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
from scipy.ndimage import gaussian_filter as norm_kde
import copy
from .pdf import gal_lnprior, parallax_lnprior
from .utils import quantile, draw_sar, get_seds, magnitude, phot_loglike
try:
from scipy.special import logsumexp
except ImportError:
from scipy.misc import logsumexp
str_type = str
float_type = float
int_type = int
__all__ = ["cornerplot", "dist_vs_red", "posterior_predictive",
"photometric_offsets", "photometric_offsets_2d", "_hist2d"]
def cornerplot(idxs, data, params, lndistprior=None, coord=None,
avlim=(0., 6.), rvlim=(1., 8.), weights=None, parallax=None,
parallax_err=None, Nr=500, applied_parallax=True,
pcolor='blue', parallax_kwargs=None, span=None,
quantiles=[0.025, 0.5, 0.975], color='black', smooth=10,
hist_kwargs=None, hist2d_kwargs=None, labels=None,
label_kwargs=None, show_titles=False, title_fmt=".2f",
title_kwargs=None, title_quantiles=[0.025, 0.5, 0.975],
truths=None, truth_color='red',
truth_kwargs=None, max_n_ticks=5, top_ticks=False,
use_math_text=False, verbose=False, fig=None, rstate=None):
"""
Generate a corner plot of the 1-D and 2-D marginalized posteriors.
Parameters
----------
idxs : `~numpy.ndarray` of shape `(Nsamps)`
An array of resampled indices corresponding to the set of models used
to fit the data.
data : 3-tuple or 4-tuple containing `~numpy.ndarray`s of shape `(Nsamps)`
The data that will be plotted. Either a collection of
`(dists, reds, dreds)` that were saved, or a collection of
`(scales, avs, rvs, covs_sar)` that will be used to regenerate
`(dists, reds, dreds)` in conjunction with any applied distance
and/or parallax priors.
params : structured `~numpy.ndarray` with shape `(Nmodels,)`
Set of parameters corresponding to the input set of models. Note that
`'agewt'` will always be ignored.
lndistprior : func, optional
The log-distsance prior function used. If not provided, the galactic
model from Green et al. (2014) will be assumed.
coord : 2-tuple, optional
The galactic `(l, b)` coordinates for the object, which is passed to
`lndistprior`.
avlim : 2-tuple, optional
The Av limits used to truncate results. Default is `(0., 6.)`.
rvlim : 2-tuple, optional
The Rv limits used to truncate results. Default is `(1., 8.)`.
weights : `~numpy.ndarray` of shape `(Nsamps)`, optional
An optional set of importance weights used to reweight the samples.
parallax : float, optional
The parallax estimate for the source.
parallax_err : float, optional
The parallax error.
Nr : int, optional
The number of Monte Carlo realizations used when sampling using the
provided parallax prior. Default is `500`.
applied_parallax : bool, optional
Whether the parallax was applied when initially computing the fits.
Default is `True`.
pcolor : str, optional
Color used when plotting the parallax prior. Default is `'blue'`.
parallax_kwargs : kwargs, optional
Keyword arguments used when plotting the parallax prior passed to
`fill_between`.
span : iterable with shape `(ndim,)`, optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.99` (99% credible interval).
quantiles : iterable, optional
A list of fractional quantiles to overplot on the 1-D marginalized
posteriors as vertical dashed lines. Default is `[0.025, 0.5, 0.975]`
(spanning the 95%/2-sigma credible interval).
color : str or iterable with shape `(ndim,)`, optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting the histograms.
Default is `'black'`.
smooth : float or iterable with shape `(ndim,)`, optional
The standard deviation (either a single value or a different value for
each subplot) for the Gaussian kernel used to smooth the 1-D and 2-D
marginalized posteriors, expressed as a fraction of the span.
If an integer is provided instead, this will instead default
to a simple (weighted) histogram with `bins=smooth`.
Default is `10` (10 bins).
hist_kwargs : dict, optional
Extra keyword arguments to send to the 1-D (smoothed) histograms.
hist2d_kwargs : dict, optional
Extra keyword arguments to send to the 2-D (smoothed) histograms.
labels : iterable with shape `(ndim,)`, optional
A list of names for each parameter. If not provided, the names will
be taken from `params.dtype.names`.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
show_titles : bool, optional
Whether to display a title above each 1-D marginalized posterior
showing the quantiles specified by `title_quantiles`. By default,
This will show the median (0.5 quantile) along with the upper/lower
bounds associated with the 0.025 and 0.975 (95%/2-sigma credible
interval) quantiles.
Default is `True`.
title_fmt : str, optional
The format string for the quantiles provided in the title. Default is
`'.2f'`.
title_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_title` command.
title_quantiles : iterable, optional
A list of 3 fractional quantiles displayed in the title, ordered
from lowest to highest. Default is `[0.025, 0.5, 0.975]`
(spanning the 95%/2-sigma credible interval).
truths : iterable with shape `(ndim,)`, optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape `(ndim,)`, optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
top_ticks : bool, optional
Whether to label the top (rather than bottom) ticks. Default is
`False`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
verbose : bool, optional
Whether to print the values of the computed quantiles associated with
each parameter. Default is `False`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the traces and marginalized 1-D posteriors
onto the provided figure. Otherwise, by default an
internal figure is generated.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
Returns
-------
cornerplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output corner plot.
"""
# Initialize values.
if quantiles is None:
quantiles = []
if truth_kwargs is None:
truth_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if title_kwargs is None:
title_kwargs = dict()
if hist_kwargs is None:
hist_kwargs = dict()
if hist2d_kwargs is None:
hist2d_kwargs = dict()
if weights is None:
weights = np.ones_like(idxs, dtype='float')
if rstate is None:
rstate = np.random
if applied_parallax:
if parallax is None or parallax_err is None:
raise ValueError("`parallax` and `parallax_err` must be provided "
"together.")
if parallax_kwargs is None:
parallax_kwargs = dict()
if lndistprior is None:
lndistprior = gal_lnprior
# Set defaults.
hist_kwargs['alpha'] = hist_kwargs.get('alpha', 0.6)
hist2d_kwargs['alpha'] = hist2d_kwargs.get('alpha', 0.6)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)
truth_kwargs['alpha'] = truth_kwargs.get('alpha', 0.7)
parallax_kwargs['alpha'] = parallax_kwargs.get('alpha', 0.3)
# Ignore age weights.
labels = [l for l in params.dtype.names if l != 'agewt']
# Deal with 1D results.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
samples = params[idxs][labels]
samples = samples.view((np.float64, len(samples.dtype.names)))
samples = np.atleast_1d(samples)
if len(samples.shape) == 1:
samples = np.atleast_2d(samples)
else:
assert len(samples.shape) == 2, "Samples must be 1- or 2-D."
samples = samples.T
assert samples.shape[0] <= samples.shape[1], "There are more " \
"dimensions than samples!"
try:
# Grab distance and reddening samples.
ddraws, adraws, rdraws = copy.deepcopy(data)
pdraws = 1. / ddraws
except:
# Regenerate distance and reddening samples from inputs.
scales, avs, rvs, covs_sar = copy.deepcopy(data)
if lndistprior == gal_lnprior and coord is None:
raise ValueError("`coord` must be passed if the default distance "
"prior was used.")
# Add in scale/parallax/distance, Av, and Rv realizations.
nsamps = len(idxs)
sdraws, adraws, rdraws = draw_sar(scales, avs, rvs, covs_sar,
ndraws=Nr, avlim=avlim, rvlim=rvlim,
rstate=rstate)
pdraws = np.sqrt(sdraws)
ddraws = 1. / pdraws
# Re-apply distance and parallax priors to realizations.
lnp_draws = lndistprior(ddraws, coord)
if applied_parallax:
lnp_draws += parallax_lnprior(pdraws, parallax, parallax_err)
# Resample draws.
lnp = logsumexp(lnp_draws, axis=1)
pwt = np.exp(lnp_draws - lnp[:, None])
pwt /= pwt.sum(axis=1)[:, None]
ridx = [rstate.choice(Nr, p=pwt[i]) for i in range(nsamps)]
pdraws = pdraws[np.arange(nsamps), ridx]
ddraws = ddraws[np.arange(nsamps), ridx]
adraws = adraws[np.arange(nsamps), ridx]
rdraws = rdraws[np.arange(nsamps), ridx]
# Append to samples.
samples = np.c_[samples.T, adraws, rdraws, pdraws, ddraws].T
ndim, nsamps = samples.shape
# Check weights.
if weights.ndim != 1:
raise ValueError("Weights must be 1-D.")
if nsamps != weights.shape[0]:
raise ValueError("The number of weights and samples disagree!")
# Determine plotting bounds.
if span is None:
span = [0.99 for i in range(ndim)]
span = list(span)
if len(span) != ndim:
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = quantile(samples[i], q, weights=weights)
# Set labels
if labels is None:
labels = list(params.dtype.names)
labels.append('Av')
labels.append('Rv')
labels.append('Parallax')
labels.append('Distance')
# Setting up smoothing.
if (isinstance(smooth, int_type) or isinstance(smooth, float_type)):
smooth = [smooth for i in range(ndim)]
# Setup axis layout (from `corner.py`).
factor = 2.0 # size of side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # size of width/height margin
plotdim = factor * ndim + factor * (ndim - 1.) * whspace # plot size
dim = lbdim + plotdim + trdim # total size
# Initialize figure.
if fig is None:
fig, axes = plt.subplots(ndim, ndim, figsize=(dim, dim))
else:
try:
fig, axes = fig
axes = np.array(axes).reshape((ndim, ndim))
except:
raise ValueError("Mismatch between axes and dimension.")
# Format figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Plotting.
for i, x in enumerate(samples):
if np.shape(samples)[0] == 1:
ax = axes
else:
ax = axes[i, i]
# Plot the 1-D marginalized posteriors.
# Setup axes
ax.set_xlim(span[i])
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
ax.yaxis.set_major_locator(NullLocator())
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
if i < ndim - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# Generate distribution.
sx = smooth[i]
if isinstance(sx, int_type):
# If `sx` is an integer, plot a weighted histogram with
# `sx` bins within the provided bounds.
n, b, _ = ax.hist(x, bins=sx, weights=weights, color=color,
range=np.sort(span[i]), **hist_kwargs)
else:
# If `sx` is a float, oversample the data relative to the
# smoothing filter by a factor of 10, then use a Gaussian
# filter to smooth the results.
bins = int(round(10. / sx))
n, b = np.histogram(x, bins=bins, weights=weights,
range=np.sort(span[i]))
n = norm_kde(n, 10.)
b0 = 0.5 * (b[1:] + b[:-1])
n, b, _ = ax.hist(b0, bins=b, weights=n,
range=np.sort(span[i]), color=color,
**hist_kwargs)
ax.set_ylim([0., max(n) * 1.05])
# Plot quantiles.
if quantiles is not None and len(quantiles) > 0:
qs = quantile(x, quantiles, weights=weights)
for q in qs:
ax.axvline(q, lw=2, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print(labels[i], [blob for blob in zip(quantiles, qs)])
# Add truth value(s).
if truths is not None and truths[i] is not None:
try:
[ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[i]]
except:
ax.axvline(truths[i], color=truth_color, **truth_kwargs)
# Set titles.
if show_titles:
title = None
if title_fmt is not None:
ql, qm, qh = quantile(x, title_quantiles, weights=weights)
q_minus, q_plus = qm - ql, qh - qm
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(qm), fmt(q_minus), fmt(q_plus))
title = "{0} = {1}".format(labels[i], title)
ax.set_title(title, **title_kwargs)
# Add parallax prior.
if i == ndim - 2 and parallax is not None and parallax_err is not None:
parallax_logpdf = parallax_lnprior(b, parallax, parallax_err)
parallax_pdf = np.exp(parallax_logpdf - max(parallax_logpdf))
parallax_pdf *= max(n) / max(parallax_pdf)
ax.fill_between(b, parallax_pdf, color=pcolor, **parallax_kwargs)
for j, y in enumerate(samples):
if np.shape(samples)[0] == 1:
ax = axes
else:
ax = axes[i, j]
# Plot the 2-D marginalized posteriors.
# Setup axes.
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.yaxis.set_major_formatter(sf)
if i < ndim - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# Generate distribution.
sy = smooth[j]
check_ix = isinstance(sx, int_type)
check_iy = isinstance(sy, int_type)
if check_ix and check_iy:
fill_contours = False
plot_contours = False
else:
fill_contours = True
plot_contours = True
hist2d_kwargs['fill_contours'] = hist2d_kwargs.get('fill_contours',
fill_contours)
hist2d_kwargs['plot_contours'] = hist2d_kwargs.get('plot_contours',
plot_contours)
_hist2d(y, x, ax=ax, span=[span[j], span[i]],
weights=weights, color=color, smooth=[sy, sx],
**hist2d_kwargs)
# Add truth values
if truths is not None:
if truths[j] is not None:
try:
[ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[j]]
except:
ax.axvline(truths[j], color=truth_color,
**truth_kwargs)
if truths[i] is not None:
try:
[ax.axhline(t, color=truth_color, **truth_kwargs)
for t in truths[i]]
except:
ax.axhline(truths[i], color=truth_color,
**truth_kwargs)
return (fig, axes)
def dist_vs_red(data, ebv=None, dist_type='distance_modulus',
lndistprior=None, coord=None, avlim=(0., 6.), rvlim=(1., 8.),
weights=None, parallax=None, parallax_err=None, Nr=300,
cmap='Blues', bins=300, span=None, smooth=0.01,
plot_kwargs=None, truths=None, truth_color='red',
truth_kwargs=None, rstate=None):
"""
Generate a 2-D plot of distance vs reddening.
Parameters
----------
data : 3-tuple or 4-tuple containing `~numpy.ndarray`s of shape `(Nsamps)`
The data that will be plotted. Either a collection of
`(dists, reds, dreds)` that were saved, or a collection of
`(scales, avs, rvs, covs_sar)` that will be used to regenerate
`(dists, reds)` in conjunction with any applied distance
and/or parallax priors.
ebv : bool, optional
If provided, will convert from Av to E(B-V) when plotting using
the provided Rv values. Default is `False`.
dist_type : str, optional
The distance format to be plotted. Options include `'parallax'`,
`'scale'`, `'distance'`, and `'distance_modulus'`.
Default is `'distance_modulus`.
lndistprior : func, optional
The log-distsance prior function used. If not provided, the galactic
model from Green et al. (2014) will be assumed.
coord : 2-tuple, optional
The galactic `(l, b)` coordinates for the object, which is passed to
`lndistprior`.
avlim : 2-tuple, optional
The Av limits used to truncate results. Default is `(0., 6.)`.
rvlim : 2-tuple, optional
The Rv limits used to truncate results. Default is `(1., 8.)`.
weights : `~numpy.ndarray` of shape `(Nsamps)`, optional
An optional set of importance weights used to reweight the samples.
parallax : float, optional
The parallax estimate for the source.
parallax_err : float, optional
The parallax error.
Nr : int, optional
The number of Monte Carlo realizations used when sampling using the
provided parallax prior. Default is `300`.
cmap : str, optional
The colormap used when plotting. Default is `'Blues'`.
bins : int or list of ints with length `(ndim,)`, optional
The number of bins to be used in each dimension. Default is `300`.
span : iterable with shape `(2, 2)`, optional
A list where each element is a length-2 tuple containing
lower and upper bounds. If not provided, the x-axis will use the
provided Av bounds while the y-axis will span `(4., 19.)` in
distance modulus (both appropriately transformed).
smooth : int/float or list of ints/floats with shape `(ndim,)`, optional
The standard deviation (either a single value or a different value for
each axis) for the Gaussian kernel used to smooth the 2-D
marginalized posteriors. If an int is passed, the smoothing will
be applied in units of the binning in that dimension. If a float
is passed, it is expressed as a fraction of the span.
Default is `0.01` (1% smoothing).
**Cannot smooth by more than the provided parallax will allow.**
plot_kwargs : dict, optional
Extra keyword arguments to be used when plotting the smoothed
2-D histograms.
truths : iterable with shape `(ndim,)`, optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape `(ndim,)`, optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
Returns
-------
hist2d : (counts, xedges, yedges, `~matplotlib.figure.Image`)
Output 2-D histogram.
"""
# Initialize values.
if truth_kwargs is None:
truth_kwargs = dict()
if plot_kwargs is None:
plot_kwargs = dict()
if weights is None:
weights = np.ones_like(data[0], dtype='float')
if rstate is None:
rstate = np.random
if lndistprior is None:
lndistprior = gal_lnprior
if parallax is None or parallax_err is None:
parallax, parallax_err = np.nan, np.nan
# Establish minimum smoothing in distance.
p1sig = np.array([parallax + parallax_err,
max(parallax - parallax_err, 1e-10)])
p_min_smooth = abs(np.diff(p1sig)) / 2.
s_min_smooth = abs(np.diff(p1sig**2)) / 2.
d_min_smooth = abs(np.diff(1. / p1sig)) / 2.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dm_min_smooth = abs(np.diff(5. * np.log10(1. / p1sig) + 10.)) / 2.
# Set up axes and labels.
if dist_type not in ['parallax', 'scale', 'distance', 'distance_modulus']:
raise ValueError("The provided `dist_type` is not valid.")
if span is None:
avlims = avlim
dlims = 10**(np.array([4., 19.]) / 5. - 2.)
else:
avlims, dlims = span
try:
xbin, ybin = bins
except:
xbin = ybin = bins
if ebv:
ylabel = r'$E(B-V)$ [mag]'
ylims = avlims # default Rv goes from [1., 8.] -> min(Rv) = 1.
else:
ylabel = r'$A_v$ [mag]'
ylims = avlims
if dist_type == 'scale':
xlabel = r'$s$'
xlims = (1. / dlims[::-1])**2
x_min_smooth = s_min_smooth
elif dist_type == 'parallax':
xlabel = r'$\pi$ [mas]'
xlims = 1. / dlims[::-1]
x_min_smooth = p_min_smooth
elif dist_type == 'distance':
xlabel = r'$d$ [kpc]'
xlims = dlims
x_min_smooth = d_min_smooth
elif dist_type == 'distance_modulus':
xlabel = r'$\mu$'
xlims = 5. * np.log10(dlims) + 10.
x_min_smooth = dm_min_smooth
xbins = np.linspace(xlims[0], xlims[1], xbin+1)
ybins = np.linspace(ylims[0], ylims[1], ybin+1)
dx, dy = xbins[1] - xbins[0], ybins[1] - ybins[0]
xspan, yspan = xlims[1] - xlims[0], ylims[1] - ylims[0]
# Set smoothing.
try:
if smooth[0] < 1:
xsmooth = smooth[0] * xspan
else:
xsmooth = smooth[0] * dx
if smooth[1] < 1:
ysmooth = smooth[1] * yspan
else:
ysmooth = smooth[1] * dy
except:
if smooth < 1:
xsmooth, ysmooth = smooth * xspan, smooth * yspan
else:
xsmooth, ysmooth = smooth * dx, smooth * dy
if np.isfinite(x_min_smooth):
xsmooth = min(x_min_smooth, xsmooth)
try:
xsmooth = xsmooth[0] # catch possible list
except:
pass
# Set defaults.
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)
truth_kwargs['alpha'] = truth_kwargs.get('alpha', 0.7)
try:
# Grab distance and reddening samples.
ddraws, adraws, rdraws = copy.deepcopy(data)
pdraws = 1. / ddraws
sdraws = pdraws**2
dmdraws = 5. * np.log10(ddraws) + 10.
except:
# Regenerate distance and reddening samples from inputs.
scales, avs, rvs, covs_sar = copy.deepcopy(data)
if lndistprior is None and coord is None:
raise ValueError("`coord` must be passed if the default distance "
"prior was used.")
# Generate parallax and Av realizations.
sdraws, adraws, rdraws = draw_sar(scales, avs, rvs, covs_sar,
ndraws=Nr, avlim=avlim, rvlim=rvlim,
rstate=rstate)
pdraws = np.sqrt(sdraws)
ddraws = 1. / pdraws
dmdraws = 5. * np.log10(ddraws) + 10.
# Re-apply distance and parallax priors to realizations.
lnp_draws = lndistprior(ddraws, coord)
if parallax is not None and parallax_err is not None:
lnp_draws += parallax_lnprior(pdraws, parallax, parallax_err)
lnp = logsumexp(lnp_draws, axis=1)
pwt = np.exp(lnp_draws - lnp[:, None])
pwt /= pwt.sum(axis=1)[:, None]
weights = np.repeat(weights, Nr)
weights *= pwt.flatten()
# Grab draws.
ydraws = adraws.flatten()
if ebv:
ydraws /= rdraws.flatten()
if dist_type == 'scale':
xdraws = sdraws.flatten()
elif dist_type == 'parallax':
xdraws = pdraws.flatten()
elif dist_type == 'distance':
xdraws = ddraws.flatten()
elif dist_type == 'distance_modulus':
xdraws = dmdraws.flatten()
# Generate 2-D histogram.
H, xedges, yedges = np.histogram2d(xdraws, ydraws, bins=(xbins, ybins),
weights=weights)
# Apply smoothing.
H = norm_kde(H, (xsmooth / dx, ysmooth / dy))
# Generate 2-D histogram.
img = plt.imshow(H.T, cmap=cmap, aspect='auto',
interpolation=None, origin='lower',
extent=[xlims[0], xlims[1], ylims[0], ylims[1]],
**plot_kwargs)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return H, xedges, yedges, img
def posterior_predictive(models, idxs, reds, dreds, dists, weights=None,
flux=False, data=None, data_err=None, data_mask=None,
offset=None, vcolor='blue', pcolor='red', labels=None,
rstate=None, psig=3., fig=None):
"""
Plot the posterior predictive SED.
Parameters
----------
models : `~numpy.ndarray` of shape `(Nmodels, Nfilts, Ncoeffs)`
Array of magnitude polynomial coefficients used to generate
reddened photometry.
idxs : `~numpy.ndarray` of shape `(Nsamps)`
An array of resampled indices corresponding to the set of models used
to fit the data.
reds : `~numpy.ndarray` of shape `(Nsamps)`
Reddening samples (in Av) associated with the model indices.
dreds : `~numpy.ndarray` of shape `(Nsamps)`
"Differential" reddening samples (in Rv) associated with
the model indices.
dists : `~numpy.ndarray` of shape `(Nsamps)`
Distance samples (in kpc) associated with the model indices.
weights : `~numpy.ndarray` of shape `(Nsamps)`, optional
An optional set of importance weights used to reweight the samples.
flux : bool, optional
Whether to plot the SEDs in flux space rather than magniude space.
Default is `False`.
data : `~numpy.ndarray` of shape `(Nfilt)`, optional
Observed data values (fluxes). If provided, these will be overplotted.
data_err : `~numpy.ndarray` of shape `(Nfilt)`
Associated 1-sigma errors on the data values. If provided,
these will be overplotted as **3-sigma** error bars.
data_mask : `~numpy.ndarray` of shape `(Nfilt)`
Binary mask (0/1) indicating whether the data value was observed.
If provided, these will be used to mask missing/bad data values.
offset : `~numpy.ndarray` of shape `(Nfilt)`, optional
Multiplicative photometric offsets that will be applied to
the data (i.e. `data_new = data * phot_offsets`) and errors
when provided.
vcolor : str, optional
Color used when plotting the violin plots that comprise the
SED posterior predictive distribution. Default is `'blue'`.
pcolor : str, optional
Color used when plotting the provided data values. Default is `'red'`.
labels : iterable with shape `(ndim,)`, optional
A list of names corresponding to each filter. If not provided,
an ascending set of integers `(0, 1, 2, ...)` will be used.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
top_ticks : bool, optional
Whether to label the top (rather than bottom) ticks. Default is
`False`.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
psig : float, optional
The number of sigma to plot when showcasing the error bars
from any provided `data_err`. Default is `3.`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the traces and marginalized 1-D posteriors
onto the provided figure. Otherwise, by default an
internal figure is generated.
Returns
-------
postpredplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`, dict)
The associated figure, axes, and violinplot dictionary for the
posterior predictive distribution.
"""
# Initialize values.
nmodels, nfilt, ncoeff = models.shape
nsamps = len(idxs)
if rstate is None:
rstate = np.random
if weights is None:
weights = np.ones_like(idxs, dtype='float')
if weights.ndim != 1:
raise ValueError("Weights must be 1-D.")
if nsamps != weights.shape[0]:
raise ValueError("The number of weights and samples disagree!")
if data_err is None:
data_err = np.zeros(nfilt)
if data_mask is None:
data_mask = np.ones(nfilt, dtype='bool')
if offset is None:
offset = np.ones(nfilt)
# Generate SEDs.
seds = get_seds(models[idxs], av=reds, rv=dreds, return_flux=flux)
if flux:
# SEDs are in flux space.
seds /= dists[:, None]**2
else:
# SEDs are in magnitude space.
seds += 5. * np.log10(dists)[:, None]
# Generate figure.
if fig is None:
fig, ax = fig, axes = plt.subplots(1, 1, figsize=(nfilt * 1.5, 10))
else:
fig, ax = fig
# Plot posterior predictive SED distribution.
if np.any(weights != weights[0]):
# If weights are non-uniform, sample indices proportional to weights.
idxs = rstate.choice(nsamps, p=weights/weights.sum(), size=nsamps*10)
else:
idxs = np.arange(nsamps)
parts = ax.violinplot(seds, positions=np.arange(nfilt),
showextrema=False)
for pc in parts['bodies']:
pc.set_facecolor(vcolor)
pc.set_edgecolor('none')
pc.set_alpha(0.4)
# Plot photometry.
if data is not None:
if flux:
m = data[data_mask] * offset[data_mask]
e = data_err[data_mask] * offset[data_mask]
else:
m, e = magnitude(data[data_mask] * offset[data_mask],
data_err[data_mask] * offset[data_mask])
ax.errorbar(np.arange(nfilt)[data_mask], m, yerr=psig*e,
marker='o', color=pcolor, linestyle='none',
ms=7, lw=3)
# Label axes.
ax.set_xticks(np.arange(nfilt))
if labels is not None:
ax.set_xticklabels(labels, rotation='vertical')
if flux:
ax.set_ylabel('Flux')
else:
ax.set_ylabel('Magnitude')
ax.set_ylim(ax.get_ylim()[::-1]) # flip axis
plt.tight_layout()
return fig, ax, parts
def photometric_offsets(phot, err, mask, models, idxs, reds, dreds, dists,
x=None, flux=True, weights=None, bins=100,
offset=None, dim_prior=True,
plot_thresh=0., cmap='viridis', xspan=None, yspan=None,
titles=None, xlabel=None, plot_kwargs=None, fig=None):
"""
Plot photometric offsets (`mag_pred - mag_obs`).
Parameters
----------
phot : `~numpy.ndarray` of shape `(Nobj, Nfilt)`, optional
Observed data values (fluxes). If provided, these will be overplotted.
err : `~numpy.ndarray` of shape `(Nobj, Nfilt)`
Associated errors on the data values. If provided, these will be
overplotted as error bars.
mask : `~numpy.ndarray` of shape `(Nobj, Nfilt)`
Binary mask (0/1) indicating whether the data value was observed.
If provided, these will be used to mask missing/bad data values.
models : `~numpy.ndarray` of shape `(Nmodels, Nfilts, Ncoeffs)`
Array of magnitude polynomial coefficients used to generate
reddened photometry.
idxs : `~numpy.ndarray` of shape `(Nobj, Nsamps)`
An array of resampled indices corresponding to the set of models used
to fit the data.
reds : `~numpy.ndarray` of shape `(Nobj, Nsamps)`
Reddening samples (in Av) associated with the model indices.
dreds : `~numpy.ndarray` of shape `(Nsamps)`
"Differential" reddening samples (in Rv) associated with
the model indices.
dists : `~numpy.ndarray` of shape `(Nobj, Nsamps)`
Distance samples (in kpc) associated with the model indices.
x : `~numpy.ndarray` with shape `(Nobj)` or `(Nobj, Nsamps)`, optional
Corresponding values to be plotted on the `x` axis. In not provided,
the default behavior is to plot as a function of observed magnitude.
flux : bool, optional
Whether the photometry provided is in fluxes (instead of magnitudes).
Default is `True`.
weights : `~numpy.ndarray` of shape `(Nobj)` or `(Nobj, Nsamps)`, optional
An optional set of importance weights used to reweight the samples.
bins : single value or iterable of length `Nfilt`, optional
The number of bins to use. Passed to `~matplotlib.pyplot.hist2d`.
Default is `100`.
offset : `~numpy.ndarray` of shape `(Nfilt)`, optional
Multiplicative photometric offsets that will be applied to
the data (i.e. `data_new = data * phot_offsets`) and errors
when provided.
dim_prior : bool, optional
Whether to apply a dimensional-based correction (prior) to the
log-likelihood when reweighting the data while cycling through each
band. Transforms the likelihood to a chi2 distribution
with `Nfilt - 3` degrees of freedom. Default is `True`.
plot_thresh : float, optional
The threshold used to threshold the colormap when plotting.
Default is `0.`.
cmap : colormap, optional
The colormap used when plotting results. Default is `'viridis'`.
xspan : iterable with shape `(nfilt, 2)`, optional
A list where each element is a length-2 tuple containing
lower and upper bounds for the x-axis for each plot.
yspan : iterable with shape `(nfilt, 2)`, optional
A list where each element is a length-2 tuple containing
lower and upper bounds for the y-axis for each plot.
titles : iterable of str of length `Nfilt`, optional
Titles for each of the subplots corresponding to each band.
If not provided `Band #` will be used.
xlabel : str, optional
Labels for the x-axis of each subplot. If not provided,
these will default to the titles.
plot_kwargs : kwargs, optional
Keyword arguments to be passed to `~matplotlib.pyplot.imshow`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the traces and marginalized 1-D posteriors
onto the provided figure. Otherwise, by default an
internal figure is generated.
Returns
-------
postpredplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
The associated figure and axes for the photometric offsets.
"""
# Initialize values.
nmodels, nfilt, ncoeff = models.shape
nobj, nsamps = idxs.shape
if plot_kwargs is None:
plot_kwargs = dict()
if weights is None:
weights = np.ones((nobj, nsamps))
elif weights.shape != (nobj, nsamps):
weights = np.repeat(weights, nsamps).reshape(nobj, nsamps)
try:
nbins = len(bins)
if nbins != 2:
bins = [b for b in bins]
else:
bins = [bins for i in range(nfilt)]
except:
bins = [bins for i in range(nfilt)]
pass
if titles is None:
titles = ['Band {0}'.format(i) for i in range(nfilt)]
if xlabel is None:
if x is None:
xlabel = titles
else:
xlabel = ['Label' for i in range(nfilt)]
else:
xlabel = [xlabel for i in range(nfilt)]
if offset is None:
offset = np.ones(nfilt)
# Compute posterior predictive SED magnitudes.
mpred = get_seds(models[idxs.flatten()],
av=reds.flatten(), rv=dreds.flatten())
mpred += 5. * np.log10(dists.flatten())[:, None]
mpred = mpred.reshape(nobj, nsamps, nfilt)
# Convert observed data to magnitudes.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if flux:
magobs, mageobs = magnitude(phot * offset, err * offset)
else:
magobs, mageobs = phot + offset, err
# Generate figure.
if fig is None:
ncols = 5
nrows = (nfilt - 1) // ncols + 1
fig, axes = fig, axes = plt.subplots(nrows, ncols,
figsize=(ncols * 6, nrows * 5))
else:
fig, axes = fig
nrows, ncols = axes.shape
ax = axes.flatten()
# Plot offsets.
for i in range(nfilt):
# Compute selection ignoring current band.
mtemp = np.array(mask)
mtemp[:, i] = False
s = (mask[:, i] & (np.sum(mtemp, axis=1) > 3) &
(np.all(np.isfinite(magobs), axis=1)))
# Compute weights from ignoring current band.
lnl = np.array([phot_loglike(mo, me, mt, mp, dim_prior=dim_prior)
for mo, me, mt, mp in zip(magobs[s], mageobs[s],
mtemp[s], mpred[s])])
levid = logsumexp(lnl, axis=1)
logwt = lnl - levid[:, None]
wt = np.exp(logwt)
wt /= wt.sum(axis=1)[:, None]
# Repeat to match up with `nsamps`.
mobs = np.repeat(magobs[s, i], nsamps)
if x is None:
xp = mobs
else:
if x.shape == (nobj, nsamps):
xp = x[s].flatten()
else:
xp = np.repeat(x[s], nsamps)
# Plot 2-D histogram.
mp = mpred[s, :, i].flatten()
w = weights[s].flatten() * wt.flatten()
if xspan is None:
xlow, xhigh = quantile(xp, [0.02, 0.98], weights=w)
bx = np.linspace(xlow, xhigh, bins[i] + 1)
else:
bx = np.linspace(xspan[i][0], xspan[i][1], bins[i] + 1)
if yspan is None:
ylow, yhigh = quantile(mp - mobs, [0.02, 0.98], weights=w)
by = np.linspace(ylow, yhigh, bins[i] + 1)
else:
by = np.linspace(yspan[i][0], yspan[i][1], bins[i] + 1)
ax[i].hist2d(xp, mp - mobs, bins=(bx, by), weights=w,
cmin=plot_thresh, cmap=cmap, **plot_kwargs)
ax[i].set_xlabel(xlabel[i])
ax[i].set_title(titles[i])
ax[i].set_ylabel(r'$\Delta\,$mag')
# Clear other axes.
for i in range(nfilt, nrows*ncols):
ax[i].set_frame_on(False)
ax[i].set_xticks([])
ax[i].set_yticks([])
plt.tight_layout()
return fig, axes
def photometric_offsets_2d(phot, err, mask, models, idxs, reds, dreds, dists,
x, y, flux=True, weights=None, bins=100,
offset=None, dim_prior=True, plot_thresh=10.,
cmap='coolwarm', clims=(-0.05, 0.05),
xspan=None, yspan=None, titles=None, show_off=True,
xlabel=None, ylabel=None, plot_kwargs=None,
fig=None):
"""
Plot photometric offsets (`mag_pred - mag_obs`).
Parameters
----------
phot : `~numpy.ndarray` of shape `(Nobj, Nfilt)`, optional
Observed data values (fluxes). If provided, these will be overplotted.
err : `~numpy.ndarray` of shape `(Nobj, Nfilt)`
Associated errors on the data values. If provided, these will be
overplotted as error bars.
mask : `~numpy.ndarray` of shape `(Nobj, Nfilt)`
Binary mask (0/1) indicating whether the data value was observed.
If provided, these will be used to mask missing/bad data values.
models : `~numpy.ndarray` of shape `(Nmodels, Nfilts, Ncoeffs)`
Array of magnitude polynomial coefficients used to generate
reddened photometry.
idxs : `~numpy.ndarray` of shape `(Nobj, Nsamps)`
An array of resampled indices corresponding to the set of models used
to fit the data.
reds : `~numpy.ndarray` of shape `(Nobj, Nsamps)`
Reddening samples (in Av) associated with the model indices.
dreds : `~numpy.ndarray` of shape `(Nsamps)`
"Differential" reddening samples (in Rv) associated with
the model indices.
dists : `~numpy.ndarray` of shape `(Nobj, Nsamps)`
Distance samples (in kpc) associated with the model indices.
x : `~numpy.ndarray` with shape `(Nobj)` or `(Nobj, Nsamps)`
Corresponding values to be plotted on the `x` axis. In not provided,
the default behavior is to plot as a function of observed magnitude.
y : `~numpy.ndarray` with shape `(Nobj)` or `(Nobj, Nsamps)`
Corresponding values to be plotted on the `x` axis. In not provided,
the default behavior is to plot as a function of observed magnitude.
flux : bool, optional
Whether the photometry provided is in fluxes (instead of magnitudes).
Default is `True`.
weights : `~numpy.ndarray` of shape `(Nobj)` or `(Nobj, Nsamps)`, optional
An optional set of importance weights used to reweight the samples.
bins : single value or iterable of length `Nfilt`, optional
The number of bins to use. Passed to `~matplotlib.pyplot.hist2d`.
Default is `100`.
offset : `~numpy.ndarray` of shape `(Nfilt)`, optional
Multiplicative photometric offsets that will be applied to
the data (i.e. `data_new = data * phot_offsets`) and errors
when provided.
dim_prior : bool, optional
Whether to apply a dimensional-based correction (prior) to the
log-likelihood when reweighting the data while cycling through each
band. Transforms the likelihood to a chi2 distribution
with `Nfilt - 3` degrees of freedom. Default is `True`.
plot_thresh : float, optional
The threshold used to threshold the colormap when plotting.
Default is `10.`.
cmap : colormap, optional
The colormap used when plotting results. Default is `'coolwarm'`.
clims : 2-tuple, optional
Plotting bounds for the colorbar. Default is `(-0.05, 0.05)`.
xspan : iterable with shape `(nfilt, 2)`, optional
A list where each element is a length-2 tuple containing
lower and upper bounds for the x-axis for each plot.
yspan : iterable with shape `(nfilt, 2)`, optional
A list where each element is a length-2 tuple containing
lower and upper bounds for the y-axis for each plot.
titles : iterable of str of length `Nfilt`, optional
Titles for each of the subplots corresponding to each band.
If not provided `Band #` will be used.
show_off : bool, optional
Whether to include the offsets in the titles. Default is `True`.
xlabel : str, optional
Label for the x-axis of each subplot. If not provided,
this will default to `X`.
ylabel : str, optional
Label for the y-axis of each subplot. If not provided,
this will default to `Y`.
plot_kwargs : kwargs, optional
Keyword arguments to be passed to `~matplotlib.pyplot.imshow`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the traces and marginalized 1-D posteriors
onto the provided figure. Otherwise, by default an
internal figure is generated.
Returns
-------
postpredplot : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
The associated figure and axes for the photometric offsets.
"""
# Initialize values.
nmodels, nfilt, ncoeff = models.shape
nobj, nsamps = idxs.shape
if plot_kwargs is None:
plot_kwargs = dict()
if weights is None:
weights = np.ones((nobj, nsamps))
elif weights.shape != (nobj, nsamps):
weights = np.repeat(weights, nsamps).reshape(nobj, nsamps)
try:
nbins = len(bins)
if nbins != 2:
bins = [b for b in bins]
else:
bins = [bins for i in range(nfilt)]
except:
bins = [bins for i in range(nfilt)]
pass
if titles is None:
titles = ['Band {0}'.format(i) for i in range(nfilt)]
if show_off and offset is not None:
titles = [t+' ({:2.2}% offset)'.format(100.*(off-1.))
for t, off in zip(titles, offset)]
if xlabel is None:
xlabel = 'X'
if ylabel is None:
ylabel = 'Y'
if offset is None:
offset = np.ones(nfilt)
# Compute posterior predictive SED magnitudes.
mpred = get_seds(models[idxs.flatten()],
av=reds.flatten(), rv=dreds.flatten())
mpred += 5. * np.log10(dists.flatten())[:, None]
mpred = mpred.reshape(nobj, nsamps, nfilt)
# Convert observed data to magnitudes.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if flux:
magobs, mageobs = magnitude(phot * offset, err * offset)
else:
magobs, mageobs = phot + offset, err
# Magnitude offsets.
dm = mpred - magobs[:, None]
for i in range(nfilt):
dm[~mask[:, i], :, i] = np.nan
# Generate figure.
if fig is None:
ncols = 5
nrows = (nfilt - 1) // ncols + 1
fig, axes = fig, axes = plt.subplots(nrows, ncols,
figsize=(ncols * 15, nrows * 12))
else:
fig, axes = fig
nrows, ncols = axes.shape
ax = axes.flatten()
# Plot offsets.
for i in range(nfilt):
# Bin in 2-D.
n, xbins, ybins = np.histogram2d(x, y, bins=bins[i])
xcent = 0.5 * (xbins[1:] + xbins[:-1])
ycent = 0.5 * (ybins[1:] + ybins[:-1])
bounds = (xcent[0], xcent[-1], ycent[0], ycent[-1]) # default size
# Digitize values.
xloc, yloc = np.digitize(x, xbins), np.digitize(y, ybins)
# Compute selection ignoring current band.
mtemp = np.array(mask)
mtemp[:, i] = False
s = (mask[:, i] & (np.sum(mtemp, axis=1) > 3) &
(np.all(np.isfinite(magobs), axis=1)))
# Compute weights from ignoring current band.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
lnl = np.array([phot_loglike(mo, me, mt, mp, dim_prior=dim_prior)
for mo, me, mt, mp in zip(magobs, mageobs,
mtemp, mpred)])
levid = logsumexp(lnl, axis=1)
logwt = lnl - levid[:, None]
wt = np.exp(logwt)
wt /= wt.sum(axis=1)[:, None]
# Compute weighted median offsets.
offset2d = np.zeros((len(xbins) - 1, len(ybins) - 1))
for xidx in range(len(xbins) - 1):
for yidx in range(len(ybins) - 1):
bsel = np.where((xloc == xidx) & (yloc == yidx) & s)[0]
if len(bsel) >= plot_thresh:
# If we have enough objects, compute weighted median.
off, w = dm[bsel, :, i], wt[bsel] * weights[bsel]
off_med = quantile(off.flatten(), [0.5], w.flatten())[0]
offset2d[xidx, yidx] = off_med
else:
# If we don't have enough objects, mask bin.
offset2d[xidx, yidx] = np.nan
# Plot offsets over 2-D histogram.
if xspan is not None:
bounds[:2] = xspan[i]
if yspan is not None:
bounds[2:] = yspan[i]
img = ax[i].imshow(offset2d.T, origin='lower', extent=bounds,
vmin=clims[0], vmax=clims[1], aspect='auto',
cmap=cmap, **plot_kwargs)
ax[i].set_xlabel(xlabel)
ax[i].set_ylabel(ylabel)
ax[i].set_title(titles[i])
plt.colorbar(img, ax=ax[i], label=r'$\Delta\,$mag')
# Clear other axes.
for i in range(nfilt, nrows*ncols):
ax[i].set_frame_on(False)
ax[i].set_xticks([])
ax[i].set_yticks([])
plt.tight_layout()
return fig, axes
def _hist2d(x, y, smooth=0.02, span=None, weights=None, levels=None,
ax=None, color='gray', plot_datapoints=False, plot_density=True,
plot_contours=True, no_fill_contours=False, fill_contours=True,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
**kwargs):
"""
Internal function called by :meth:`cornerplot` used to generate a
a 2-D histogram/contour of samples.
Parameters
----------
x : interable with shape `(nsamps,)`
Sample positions in the first dimension.
y : iterable with shape `(nsamps,)`
Sample positions in the second dimension.
span : iterable with shape `(ndim,)`, optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `0.99` (99% credible interval).
weights : iterable with shape `(nsamps,)`
Weights associated with the samples. Default is `None` (no weights).
levels : iterable, optional
The contour levels to draw. Default are `[0.5, 1, 1.5, 2]`-sigma.
ax : `~matplotlib.axes.Axes`, optional
An `~matplotlib.axes.axes` instance on which to add the 2-D histogram.
If not provided, a figure will be generated.
color : str, optional
The `~matplotlib`-style color used to draw lines and color cells
and contours. Default is `'gray'`.
plot_datapoints : bool, optional
Whether to plot the individual data points. Default is `False`.
plot_density : bool, optional
Whether to draw the density colormap. Default is `True`.
plot_contours : bool, optional
Whether to draw the contours. Default is `True`.
no_fill_contours : bool, optional
Whether to add absolutely no filling to the contours. This differs
from `fill_contours=False`, which still adds a white fill at the
densest points. Default is `False`.
fill_contours : bool, optional
Whether to fill the contours. Default is `True`.
contour_kwargs : dict
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
"""
if ax is None:
ax = plt.gca()
# Determine plotting bounds.
data = [x, y]
if span is None:
span = [0.99 for i in range(2)]
span = list(span)
if len(span) != 2:
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = quantile(data[i], q, weights=weights)
# The default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# Color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# Color map used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels)+1)
# Initialize smoothing.
if (isinstance(smooth, int_type) or isinstance(smooth, float_type)):
smooth = [smooth, smooth]
bins = []
svalues = []
for s in smooth:
if isinstance(s, int_type):
# If `s` is an integer, the weighted histogram has
# `s` bins within the provided bounds.
bins.append(s)
svalues.append(0.)
else:
# If `s` is a float, oversample the data relative to the
# smoothing filter by a factor of 2, then use a Gaussian
# filter to smooth the results.
bins.append(int(round(2. / s)))
svalues.append(2.)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=list(map(np.sort, span)),
weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range.")
# Smooth the results.
if not np.all(svalues == 0.):
H = norm_kde(H, svalues)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = (np.diff(V) == 0)
if np.any(m) and plot_contours:
logging.warning("Too few points to create valid contours.")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = (np.diff(V) == 0)
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([X1[0] + np.array([-2, -1]) * np.diff(X1[:2]), X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:])])
Y2 = np.concatenate([Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]), Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:])])
# Plot the data points.
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(span[0])
ax.set_ylim(span[1])
| 38.369375 | 79 | 0.590738 |
cccad837e6301b13b0d507c5f85f3147c5bf118d | 2,220 | py | Python | deployment/cfn/utils/cfn.py | project-icp/bee-pollinator-app | 9357755e6d78e1bf8594de1b777d02318bb3e54f | [
"Apache-2.0"
] | 6 | 2016-10-14T18:54:39.000Z | 2021-06-03T21:04:27.000Z | deployment/cfn/utils/cfn.py | project-icp/bee-pollinator-app | 9357755e6d78e1bf8594de1b777d02318bb3e54f | [
"Apache-2.0"
] | 528 | 2016-10-14T17:38:54.000Z | 2022-02-26T10:53:21.000Z | deployment/cfn/utils/cfn.py | project-icp/bee-pollinator-app | 9357755e6d78e1bf8594de1b777d02318bb3e54f | [
"Apache-2.0"
] | 2 | 2016-10-17T18:06:38.000Z | 2020-10-23T09:48:24.000Z | import boto
from boto.ec2 import get_region
class AvailabilityZone(object):
"""Helper class that represents an availability zone
We often only want 2 things from an AZ - a slug and name.
This class keeps those in one location.
"""
def __init__(self, availability_zone):
"""Creates an AvailabilityZoneHelper object
Args:
availability_zone (AvailabilityZone): boto object
"""
self.availability_zone = availability_zone
@property
def cfn_name(self):
"""
Utility method to return a string appropriate for CloudFormation
name of a resource (e.g. UsEast1a)
"""
return self.availability_zone.name.title().replace('-', '')
@property
def name(self):
"""Utility function to return the name of an availability zone"""
return self.availability_zone.name
def get_availability_zones(aws_profile):
"""Helper function that returns availability zones for a region
Returns:
(list of AvailabilityZone): List of availability zones for a given
EC2 region
"""
conn = boto.connect_ec2(profile_name=aws_profile)
return [AvailabilityZone(az) for az in conn.get_all_zones()]
def get_subnet_cidr_block():
"""Generator to generate unique CIDR block subnets"""
current = 0
high = 255
while current <= high:
yield '10.0.%s.0/24' % current
current += 1
def get_recent_ami(aws_profile, filters={}, owner='self',
region='us-east-1', executable_by='self'):
conn = boto.connect_ec2(profile_name=aws_profile,
region=get_region(region))
# Filter images by owned by self first.
images = conn.get_all_images(owners=owner, filters=filters)
# If no images are owned by self, look for images self can execute.
if not images:
images = conn.get_all_images(executable_by=executable_by,
filters=filters)
# Make sure RC images are omitted from results
images = filter(lambda i: True if '.rc-' not in i.name else False, images)
return sorted(images, key=lambda i: i.creationDate, reverse=True)[0].id
| 30.410959 | 78 | 0.648649 |
a3db005ac749123a16c6303cd46c532acd7b5ccd | 3,148 | py | Python | examples/tempy_examples.py | Jai2305/TemPy | f64cd91d5c83724d37e92f4ab60103244922cccc | [
"Apache-2.0"
] | 154 | 2017-06-14T09:44:01.000Z | 2021-11-17T01:13:25.000Z | examples/tempy_examples.py | Jai2305/TemPy | f64cd91d5c83724d37e92f4ab60103244922cccc | [
"Apache-2.0"
] | 95 | 2017-08-12T12:03:40.000Z | 2022-03-31T09:02:14.000Z | examples/tempy_examples.py | Jai2305/TemPy | f64cd91d5c83724d37e92f4ab60103244922cccc | [
"Apache-2.0"
] | 88 | 2017-08-12T16:13:30.000Z | 2022-03-30T04:11:32.000Z | # -*- coding: utf-8 -*-
import json
import os
from flask import Flask, request, redirect, url_for, session
app = Flask(__name__)
# In an actual project, this secret key would be hidden.
# It is mostly used so we can create an example login session.
app.secret_key = '&Gp07(pz#oIA]jQ'
super_secret_token = 'admin_token'
@app.route('/')
def none_handler():
from templates.homepage import page
return page.render()
@app.route('/hello_world')
def hello_world_handler():
from templates.hello_world import page
return page.render()
@app.route('/star_wars')
def star_wars_handler():
from templates.star_wars import page
json_filename = os.path.join(app.static_folder, 'sw-people.json')
with open(json_filename, 'r') as f:
people = json.load(f)['characters']
return page.render(characters=people)
@app.route('/list')
def render_list_handler():
from templates.render_list import page
return page.render()
@app.route('/static')
def static_files_handler():
from templates.static_files import page
return page.render()
@app.route('/table')
def table_handler():
from templates.table_example import page
return page.render()
@app.route('/css')
def css_handler():
from templates.css_example import page
return page.render()
@app.route('/user_login')
def login_handler():
from templates.session_example.login_page import get_page
if session.get('messages'):
page = get_page(messages=session.pop('messages'))
elif session.get('token') == super_secret_token:
return redirect(url_for('user_page_handler'))
else:
page = get_page()
return page.render()
@app.route('/user_logout', methods=['POST'])
def logout_handler():
if session.get('token'):
session.pop('token')
session['messages'] = ['Successfuly logged out!']
return redirect(url_for('login_handler'))
@app.route('/user_page', methods=['GET', 'POST'])
def user_page_handler():
from templates.session_example.user_page import page
data = dict(request.form)
# In an actual project, you will probably want to use a db :)
login_credentials = {'username': 'admin', 'password': 'admin'}
if session.get('token') == super_secret_token:
success = True
elif not data:
session['messages'] = ['You must login first. ']
success = False
elif data['username'] != login_credentials['username'] or data['password'] != login_credentials['password']:
session['messages'] = ['Invalid credentials. ']
success = False
else:
session['token'] = 'admin_token'
success = True
if not success:
return redirect(url_for('login_handler'))
return page.render()
@app.route('/video_tag')
def video_tag_handler():
from templates.video_tag import page
return page.render()
@app.route('/form')
def form_handler():
from templates.form_components import page
return page.render()
@app.route('/homepage')
def homepage_handler():
from templates.homepage import page
return page.render()
if __name__ == '__main__':
app.run(port=8888, debug=True)
| 24.984127 | 112 | 0.681703 |
fa131f3e1106dc243fa62890d761bed4782906bc | 2,924 | py | Python | tests/src/tm_json/gen.py | to-miz/tm | c97a3c14aa769b5a8f94b394b4535cd42eeb31d2 | [
"MIT"
] | 51 | 2016-09-02T16:21:24.000Z | 2021-08-12T01:30:45.000Z | tests/src/tm_json/gen.py | to-miz/tm | c97a3c14aa769b5a8f94b394b4535cd42eeb31d2 | [
"MIT"
] | 3 | 2018-12-29T15:10:41.000Z | 2019-10-29T23:21:57.000Z | tests/src/tm_json/gen.py | to-miz/tm | c97a3c14aa769b5a8f94b394b4535cd42eeb31d2 | [
"MIT"
] | 5 | 2016-11-21T08:48:41.000Z | 2021-06-24T20:21:10.000Z | import os
def process_file(filename, tests):
with open(filename, 'rb') as opened_file:
data = opened_file.read()
data_str = ""
for byte in data:
if byte < 127:
c = chr(byte);
if c == '"':
data_str += "\\\""
elif c == '\\':
data_str += "\\\\"
elif c == '\n':
data_str += "\\n"
elif byte < 32:
data_str += "\\x{:X}\"\"".format(byte)
else:
data_str += c
else:
data_str += "\\x{:X}\"\"".format(byte)
ignore_str = " * doctest::may_fail(true)" if tests[0][0] else ""
print('TEST_CASE("{}"{}) {{'.format(os.path.splitext(os.path.split(file)[1])[0], ignore_str))
print(' const char* json = "{}";'.format(data_str))
for ignore, expected, ex, flag in tests:
result_str = "true" if expected else "false"
ex_str = "_ex" if ex else ""
print(' CHECK(check_json{}(json, {}, {}) == {});'.format(ex_str, len(data), flag, result_str))
print('}')
for file in os.listdir("../../external/data/test_parsing"):
if file.endswith(".json"):
if file == "n_structure_100000_opening_arrays.json" or file == "n_structure_open_array_object.json":
# These two tests are implemented manually because the json files are too big for a generated source file.
continue
if ("UTF16" in file) or ("utf16" in file) or ("UTF-16" in file) or ("BOM" in file) or ("UTF8_surrogate" in file):
# Encodings other than Utf-8 not supported.
continue;
ignore = file.startswith("i")
expected = file.startswith("y") or (ignore and ("surrogate" not in file))
concat_file = os.path.join("../../external/data/test_parsing/", file)
tests = [[ignore, expected, False, "JSON_READER_STRICT"]]
if expected:
tests.append([ignore, expected, False, "JSON_READER_JSON5"])
process_file(concat_file, tests)
# from https://stackoverflow.com/a/19309964
files = [os.path.join(dp, f) for dp, dn, fn in os.walk("../../external/data/json5-tests") for f in fn]
for file in files:
ignore = file.find("unicode") != -1
if file.endswith(".json5"):
process_file(file, [[ignore, False, False, "JSON_READER_STRICT"], [ignore, True, True, "JSON_READER_JSON5"]])
elif file.endswith(".json"):
process_file(file, [[ignore, True, False, "JSON_READER_STRICT"], [ignore, True, True, "JSON_READER_JSON5"]])
elif file.endswith(".js"):
process_file(file, [[ignore, False, False, "JSON_READER_STRICT"], [ignore, False, False, "JSON_READER_JSON5"]])
elif file.endswith(".txt"):
process_file(file, [[ignore, False, False, "JSON_READER_STRICT"], [ignore, False, False, "JSON_READER_JSON5"]]) | 50.413793 | 121 | 0.562244 |
34da0190c8d70102dc6da9c7d6c88149e624daff | 64,522 | py | Python | tests/test_connector.py | luizalabs/aiohttp | e57ef8cdbf16f029310ccc3ff93f8e5eff64c2f6 | [
"Apache-2.0"
] | 1 | 2019-06-05T16:56:59.000Z | 2019-06-05T16:56:59.000Z | tests/test_connector.py | luizalabs/aiohttp | e57ef8cdbf16f029310ccc3ff93f8e5eff64c2f6 | [
"Apache-2.0"
] | null | null | null | tests/test_connector.py | luizalabs/aiohttp | e57ef8cdbf16f029310ccc3ff93f8e5eff64c2f6 | [
"Apache-2.0"
] | null | null | null | """Tests of http client with custom Connector"""
import asyncio
import gc
import hashlib
import os.path
import platform
import socket
import ssl
import uuid
from collections import deque
from unittest import mock
import pytest
from yarl import URL
import aiohttp
from aiohttp import client, web
from aiohttp.client import ClientRequest, ClientTimeout
from aiohttp.client_reqrep import ConnectionKey
from aiohttp.connector import Connection, _DNSCacheTable
from aiohttp.helpers import PY_37
from aiohttp.test_utils import make_mocked_coro, unused_port
from aiohttp.tracing import Trace
@pytest.fixture()
def key():
"""Connection key"""
return ConnectionKey('localhost', 80, False, None, None, None, None)
@pytest.fixture
def key2():
"""Connection key"""
return ConnectionKey('localhost', 80, False, None, None, None, None)
@pytest.fixture
def ssl_key():
"""Connection key"""
return ConnectionKey('localhost', 80, True, None, None, None, None)
@pytest.fixture
def unix_sockname(shorttmpdir):
sock_path = shorttmpdir / 'socket.sock'
return str(sock_path)
@pytest.fixture
def unix_server(loop, unix_sockname):
runners = []
async def go(app):
runner = web.AppRunner(app)
runners.append(runner)
await runner.setup()
site = web.UnixSite(runner, unix_sockname)
await site.start()
yield go
for runner in runners:
loop.run_until_complete(runner.cleanup())
def test_connection_del(loop) -> None:
connector = mock.Mock()
key = mock.Mock()
protocol = mock.Mock()
loop.set_debug(0)
conn = Connection(connector, key, protocol, loop=loop)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
del conn
gc.collect()
connector._release.assert_called_with(
key,
protocol,
should_close=True
)
msg = {
'message': mock.ANY,
'client_connection': mock.ANY,
}
exc_handler.assert_called_with(loop, msg)
def test_connection_del_loop_debug(loop) -> None:
connector = mock.Mock()
key = mock.Mock()
protocol = mock.Mock()
loop.set_debug(1)
conn = Connection(connector, key, protocol, loop=loop)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
del conn
gc.collect()
msg = {
'message': mock.ANY,
'client_connection': mock.ANY,
'source_traceback': mock.ANY
}
exc_handler.assert_called_with(loop, msg)
def test_connection_del_loop_closed(loop) -> None:
connector = mock.Mock()
key = mock.Mock()
protocol = mock.Mock()
loop.set_debug(1)
conn = Connection(connector, key, protocol, loop=loop)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
loop.close()
with pytest.warns(ResourceWarning):
del conn
gc.collect()
assert not connector._release.called
assert not exc_handler.called
def test_del(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock(should_close=False)
conn._release('a', proto)
conns_impl = conn._conns
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
del conn
gc.collect()
assert not conns_impl
proto.close.assert_called_with()
msg = {'connector': mock.ANY, # conn was deleted
'connections': mock.ANY,
'message': 'Unclosed connector'}
if loop.get_debug():
msg['source_traceback'] = mock.ANY
exc_handler.assert_called_with(loop, msg)
@pytest.mark.xfail
async def test_del_with_scheduled_cleanup(loop) -> None:
loop.set_debug(True)
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=0.01)
transp = mock.Mock()
conn._conns['a'] = [(transp, 123)]
conns_impl = conn._conns
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
with pytest.warns(ResourceWarning):
# obviously doesn't deletion because loop has a strong
# reference to connector's instance method, isn't it?
del conn
await asyncio.sleep(0.01, loop=loop)
gc.collect()
assert not conns_impl
transp.close.assert_called_with()
msg = {'connector': mock.ANY, # conn was deleted
'message': 'Unclosed connector'}
if loop.get_debug():
msg['source_traceback'] = mock.ANY
exc_handler.assert_called_with(loop, msg)
def test_del_with_closed_loop(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
transp = mock.Mock()
conn._conns['a'] = [(transp, 123)]
conns_impl = conn._conns
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
loop.close()
with pytest.warns(ResourceWarning):
del conn
gc.collect()
assert not conns_impl
assert not transp.close.called
assert exc_handler.called
def test_del_empty_connector(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
exc_handler = mock.Mock()
loop.set_exception_handler(exc_handler)
del conn
assert not exc_handler.called
async def test_create_conn(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
with pytest.raises(NotImplementedError):
await conn._create_connection(object(), [], object())
def test_context_manager(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
conn.close = mock.Mock()
with conn as c:
assert conn is c
assert conn.close.called
def test_ctor_loop() -> None:
with mock.patch('aiohttp.connector.asyncio') as m_asyncio:
session = aiohttp.BaseConnector()
assert session._loop is m_asyncio.get_event_loop.return_value
def test_close(loop) -> None:
proto = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop)
assert not conn.closed
conn._conns[('host', 8080, False)] = [(proto, object())]
conn.close()
assert not conn._conns
assert proto.close.called
assert conn.closed
def test_get(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
assert conn._get(1) is None
proto = mock.Mock()
conn._conns[1] = [(proto, loop.time())]
assert conn._get(1) == proto
conn.close()
def test_get_expired(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
key = ConnectionKey('localhost', 80, False, None, None, None, None)
assert conn._get(key) is None
proto = mock.Mock()
conn._conns[key] = [(proto, loop.time() - 1000)]
assert conn._get(key) is None
assert not conn._conns
conn.close()
def test_get_expired_ssl(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
key = ConnectionKey('localhost', 80, True, None, None, None, None)
assert conn._get(key) is None
proto = mock.Mock()
conn._conns[key] = [(proto, loop.time() - 1000)]
assert conn._get(key) is None
assert not conn._conns
assert conn._cleanup_closed_transports == [proto.close.return_value]
conn.close()
def test_release_acquired(loop, key) -> None:
proto = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop, limit=5)
conn._release_waiter = mock.Mock()
conn._acquired.add(proto)
conn._acquired_per_host[key].add(proto)
conn._release_acquired(key, proto)
assert 0 == len(conn._acquired)
assert 0 == len(conn._acquired_per_host)
assert conn._release_waiter.called
conn._release_acquired(key, proto)
assert 0 == len(conn._acquired)
assert 0 == len(conn._acquired_per_host)
conn.close()
def test_release_acquired_closed(loop, key) -> None:
proto = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop, limit=5)
conn._release_waiter = mock.Mock()
conn._acquired.add(proto)
conn._acquired_per_host[key].add(proto)
conn._closed = True
conn._release_acquired(key, proto)
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
assert not conn._release_waiter.called
conn.close()
def test_release(loop, key) -> None:
conn = aiohttp.BaseConnector(loop=loop)
conn._release_waiter = mock.Mock()
proto = mock.Mock(should_close=False)
conn._acquired.add(proto)
conn._acquired_per_host[key].add(proto)
conn._release(key, proto)
assert conn._release_waiter.called
assert conn._conns[key][0][0] == proto
assert conn._conns[key][0][1] == pytest.approx(loop.time(), abs=0.1)
assert not conn._cleanup_closed_transports
conn.close()
def test_release_ssl_transport(loop, ssl_key) -> None:
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
conn._release_waiter = mock.Mock()
proto = mock.Mock()
conn._acquired.add(proto)
conn._acquired_per_host[ssl_key].add(proto)
conn._release(ssl_key, proto, should_close=True)
assert conn._cleanup_closed_transports == [proto.close.return_value]
conn.close()
def test_release_already_closed(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock()
key = 1
conn._acquired.add(proto)
conn.close()
conn._release_waiters = mock.Mock()
conn._release_acquired = mock.Mock()
conn._release(key, proto)
assert not conn._release_waiters.called
assert not conn._release_acquired.called
def test_release_waiter_no_limit(loop, key, key2) -> None:
# limit is 0
conn = aiohttp.BaseConnector(limit=0, loop=loop)
w = mock.Mock()
w.done.return_value = False
conn._waiters[key].append(w)
conn._release_waiter()
assert len(conn._waiters[key]) == 0
assert w.done.called
conn.close()
def test_release_waiter_first_available(loop, key, key2) -> None:
conn = aiohttp.BaseConnector(loop=loop)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = False
w2.done.return_value = False
conn._waiters[key].append(w2)
conn._waiters[key2].append(w1)
conn._release_waiter()
assert (w1.set_result.called and not w2.set_result.called or
not w1.set_result.called and w2.set_result.called)
conn.close()
def test_release_waiter_release_first(loop, key, key2) -> None:
conn = aiohttp.BaseConnector(loop=loop, limit=1)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = False
w2.done.return_value = False
conn._waiters[key] = deque([w1, w2])
conn._release_waiter()
assert w1.set_result.called
assert not w2.set_result.called
conn.close()
def test_release_waiter_skip_done_waiter(loop, key, key2) -> None:
conn = aiohttp.BaseConnector(loop=loop, limit=1)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = True
w2.done.return_value = False
conn._waiters[key] = deque([w1, w2])
conn._release_waiter()
assert not w1.set_result.called
assert w2.set_result.called
conn.close()
def test_release_waiter_per_host(loop, key, key2) -> None:
# no limit
conn = aiohttp.BaseConnector(loop=loop, limit=0, limit_per_host=2)
w1, w2 = mock.Mock(), mock.Mock()
w1.done.return_value = False
w2.done.return_value = False
conn._waiters[key] = deque([w1])
conn._waiters[key2] = deque([w2])
conn._release_waiter()
assert ((w1.set_result.called and not w2.set_result.called) or
(not w1.set_result.called and w2.set_result.called))
conn.close()
def test_release_waiter_no_available(loop, key, key2) -> None:
# limit is 0
conn = aiohttp.BaseConnector(limit=0, loop=loop)
w = mock.Mock()
w.done.return_value = False
conn._waiters[key].append(w)
conn._available_connections = mock.Mock(return_value=0)
conn._release_waiter()
assert len(conn._waiters) == 1
assert not w.done.called
conn.close()
def test_release_close(loop, key) -> None:
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock(should_close=True)
conn._acquired.add(proto)
conn._release(key, proto)
assert not conn._conns
assert proto.close.called
def test__drop_acquire_per_host1(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
conn._drop_acquired_per_host(123, 456)
assert len(conn._acquired_per_host) == 0
def test__drop_acquire_per_host2(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
conn._acquired_per_host[123].add(456)
conn._drop_acquired_per_host(123, 456)
assert len(conn._acquired_per_host) == 0
def test__drop_acquire_per_host3(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
conn._acquired_per_host[123].add(456)
conn._acquired_per_host[123].add(789)
conn._drop_acquired_per_host(123, 456)
assert len(conn._acquired_per_host) == 1
assert conn._acquired_per_host[123] == {789}
async def test_tcp_connector_certificate_error(loop) -> None:
req = ClientRequest('GET', URL('https://127.0.0.1:443'), loop=loop)
async def certificate_error(*args, **kwargs):
raise ssl.CertificateError
conn = aiohttp.TCPConnector(loop=loop)
conn._loop.create_connection = certificate_error
with pytest.raises(aiohttp.ClientConnectorCertificateError) as ctx:
await conn.connect(req, [], ClientTimeout())
assert isinstance(ctx.value, ssl.CertificateError)
assert isinstance(ctx.value.certificate_error, ssl.CertificateError)
assert isinstance(ctx.value, aiohttp.ClientSSLError)
async def test_tcp_connector_multiple_hosts_errors(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop)
ip1 = '192.168.1.1'
ip2 = '192.168.1.2'
ip3 = '192.168.1.3'
ip4 = '192.168.1.4'
ip5 = '192.168.1.5'
ips = [ip1, ip2, ip3, ip4, ip5]
ips_tried = []
fingerprint = hashlib.sha256(b'foo').digest()
req = ClientRequest('GET', URL('https://mocked.host'),
ssl=aiohttp.Fingerprint(fingerprint),
loop=loop)
async def _resolve_host(host, port, traces=None):
return [{
'hostname': host,
'host': ip,
'port': port,
'family': socket.AF_INET,
'proto': 0,
'flags': socket.AI_NUMERICHOST}
for ip in ips]
conn._resolve_host = _resolve_host
os_error = certificate_error = ssl_error = fingerprint_error = False
connected = False
async def create_connection(*args, **kwargs):
nonlocal os_error, certificate_error, ssl_error, fingerprint_error
nonlocal connected
ip = args[1]
ips_tried.append(ip)
if ip == ip1:
os_error = True
raise OSError
if ip == ip2:
certificate_error = True
raise ssl.CertificateError
if ip == ip3:
ssl_error = True
raise ssl.SSLError
if ip == ip4:
fingerprint_error = True
tr, pr = mock.Mock(), mock.Mock()
def get_extra_info(param):
if param == 'sslcontext':
return True
if param == 'ssl_object':
s = mock.Mock()
s.getpeercert.return_value = b'not foo'
return s
if param == 'peername':
return ('192.168.1.5', 12345)
assert False, param
tr.get_extra_info = get_extra_info
return tr, pr
if ip == ip5:
connected = True
tr, pr = mock.Mock(), mock.Mock()
def get_extra_info(param):
if param == 'sslcontext':
return True
if param == 'ssl_object':
s = mock.Mock()
s.getpeercert.return_value = b'foo'
return s
assert False
tr.get_extra_info = get_extra_info
return tr, pr
assert False
conn._loop.create_connection = create_connection
await conn.connect(req, [], ClientTimeout())
assert ips == ips_tried
assert os_error
assert certificate_error
assert ssl_error
assert fingerprint_error
assert connected
async def test_tcp_connector_resolve_host(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop, use_dns_cache=True)
res = await conn._resolve_host('localhost', 8080)
assert res
for rec in res:
if rec['family'] == socket.AF_INET:
assert rec['host'] == '127.0.0.1'
assert rec['hostname'] == 'localhost'
assert rec['port'] == 8080
elif rec['family'] == socket.AF_INET6:
assert rec['hostname'] == 'localhost'
assert rec['port'] == 8080
if platform.system() == 'Darwin':
assert rec['host'] in ('::1', 'fe80::1', 'fe80::1%lo0')
else:
assert rec['host'] == '::1'
@pytest.fixture
def dns_response(loop):
async def coro():
# simulates a network operation
await asyncio.sleep(0, loop=loop)
return ["127.0.0.1"]
return coro
async def test_tcp_connector_dns_cache_not_expired(loop, dns_response) -> None:
with mock.patch('aiohttp.connector.DefaultResolver') as m_resolver:
conn = aiohttp.TCPConnector(
loop=loop,
use_dns_cache=True,
ttl_dns_cache=10
)
m_resolver().resolve.return_value = dns_response()
await conn._resolve_host('localhost', 8080)
await conn._resolve_host('localhost', 8080)
m_resolver().resolve.assert_called_once_with(
'localhost',
8080,
family=0
)
async def test_tcp_connector_dns_cache_forever(loop, dns_response) -> None:
with mock.patch('aiohttp.connector.DefaultResolver') as m_resolver:
conn = aiohttp.TCPConnector(
loop=loop,
use_dns_cache=True,
ttl_dns_cache=10
)
m_resolver().resolve.return_value = dns_response()
await conn._resolve_host('localhost', 8080)
await conn._resolve_host('localhost', 8080)
m_resolver().resolve.assert_called_once_with(
'localhost',
8080,
family=0
)
async def test_tcp_connector_use_dns_cache_disabled(loop,
dns_response) -> None:
with mock.patch('aiohttp.connector.DefaultResolver') as m_resolver:
conn = aiohttp.TCPConnector(loop=loop, use_dns_cache=False)
m_resolver().resolve.side_effect = [dns_response(), dns_response()]
await conn._resolve_host('localhost', 8080)
await conn._resolve_host('localhost', 8080)
m_resolver().resolve.assert_has_calls([
mock.call('localhost', 8080, family=0),
mock.call('localhost', 8080, family=0)
])
async def test_tcp_connector_dns_throttle_requests(loop, dns_response) -> None:
with mock.patch('aiohttp.connector.DefaultResolver') as m_resolver:
conn = aiohttp.TCPConnector(
loop=loop,
use_dns_cache=True,
ttl_dns_cache=10
)
m_resolver().resolve.return_value = dns_response()
loop.create_task(conn._resolve_host('localhost', 8080))
loop.create_task(conn._resolve_host('localhost', 8080))
await asyncio.sleep(0, loop=loop)
m_resolver().resolve.assert_called_once_with(
'localhost',
8080,
family=0
)
async def test_tcp_connector_dns_throttle_requests_exception_spread(
loop) -> None:
with mock.patch('aiohttp.connector.DefaultResolver') as m_resolver:
conn = aiohttp.TCPConnector(
loop=loop,
use_dns_cache=True,
ttl_dns_cache=10
)
e = Exception()
m_resolver().resolve.side_effect = e
r1 = loop.create_task(conn._resolve_host('localhost', 8080))
r2 = loop.create_task(conn._resolve_host('localhost', 8080))
await asyncio.sleep(0, loop=loop)
assert r1.exception() == e
assert r2.exception() == e
async def test_tcp_connector_dns_throttle_requests_cancelled_when_close(
loop,
dns_response):
with mock.patch('aiohttp.connector.DefaultResolver') as m_resolver:
conn = aiohttp.TCPConnector(
loop=loop,
use_dns_cache=True,
ttl_dns_cache=10
)
m_resolver().resolve.return_value = dns_response()
loop.create_task(conn._resolve_host('localhost', 8080))
f = loop.create_task(conn._resolve_host('localhost', 8080))
await asyncio.sleep(0, loop=loop)
conn.close()
with pytest.raises(asyncio.futures.CancelledError):
await f
async def test_tcp_connector_dns_tracing(loop, dns_response) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_dns_resolvehost_start = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
on_dns_resolvehost_end = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
on_dns_cache_hit = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
on_dns_cache_miss = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_dns_resolvehost_start.append(on_dns_resolvehost_start)
trace_config.on_dns_resolvehost_end.append(on_dns_resolvehost_end)
trace_config.on_dns_cache_hit.append(on_dns_cache_hit)
trace_config.on_dns_cache_miss.append(on_dns_cache_miss)
trace_config.freeze()
traces = [
Trace(
session,
trace_config,
trace_config.trace_config_ctx()
)
]
with mock.patch('aiohttp.connector.DefaultResolver') as m_resolver:
conn = aiohttp.TCPConnector(
loop=loop,
use_dns_cache=True,
ttl_dns_cache=10
)
m_resolver().resolve.return_value = dns_response()
await conn._resolve_host(
'localhost',
8080,
traces=traces
)
on_dns_resolvehost_start.assert_called_once_with(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostStartParams('localhost')
)
on_dns_resolvehost_end.assert_called_once_with(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostEndParams('localhost')
)
on_dns_cache_miss.assert_called_once_with(
session,
trace_config_ctx,
aiohttp.TraceDnsCacheMissParams('localhost')
)
assert not on_dns_cache_hit.called
await conn._resolve_host(
'localhost',
8080,
traces=traces
)
on_dns_cache_hit.assert_called_once_with(
session,
trace_config_ctx,
aiohttp.TraceDnsCacheHitParams('localhost')
)
async def test_tcp_connector_dns_tracing_cache_disabled(loop,
dns_response) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_dns_resolvehost_start = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
on_dns_resolvehost_end = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_dns_resolvehost_start.append(on_dns_resolvehost_start)
trace_config.on_dns_resolvehost_end.append(on_dns_resolvehost_end)
trace_config.freeze()
traces = [
Trace(
session,
trace_config,
trace_config.trace_config_ctx()
)
]
with mock.patch('aiohttp.connector.DefaultResolver') as m_resolver:
conn = aiohttp.TCPConnector(
loop=loop,
use_dns_cache=False
)
m_resolver().resolve.side_effect = [
dns_response(),
dns_response()
]
await conn._resolve_host(
'localhost',
8080,
traces=traces
)
await conn._resolve_host(
'localhost',
8080,
traces=traces
)
on_dns_resolvehost_start.assert_has_calls([
mock.call(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostStartParams('localhost')
),
mock.call(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostStartParams('localhost')
)
])
on_dns_resolvehost_end.assert_has_calls([
mock.call(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostEndParams('localhost')
),
mock.call(
session,
trace_config_ctx,
aiohttp.TraceDnsResolveHostEndParams('localhost')
)
])
async def test_tcp_connector_dns_tracing_throttle_requests(
loop, dns_response) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_dns_cache_hit = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
on_dns_cache_miss = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_dns_cache_hit.append(on_dns_cache_hit)
trace_config.on_dns_cache_miss.append(on_dns_cache_miss)
trace_config.freeze()
traces = [
Trace(
session,
trace_config,
trace_config.trace_config_ctx()
)
]
with mock.patch('aiohttp.connector.DefaultResolver') as m_resolver:
conn = aiohttp.TCPConnector(
loop=loop,
use_dns_cache=True,
ttl_dns_cache=10
)
m_resolver().resolve.return_value = dns_response()
loop.create_task(conn._resolve_host('localhost', 8080, traces=traces))
loop.create_task(conn._resolve_host('localhost', 8080, traces=traces))
await asyncio.sleep(0, loop=loop)
on_dns_cache_hit.assert_called_once_with(
session,
trace_config_ctx,
aiohttp.TraceDnsCacheHitParams('localhost')
)
on_dns_cache_miss.assert_called_once_with(
session,
trace_config_ctx,
aiohttp.TraceDnsCacheMissParams('localhost')
)
def test_dns_error(loop) -> None:
connector = aiohttp.TCPConnector(loop=loop)
connector._resolve_host = make_mocked_coro(
raise_exception=OSError('dont take it serious'))
req = ClientRequest(
'GET', URL('http://www.python.org'),
loop=loop)
with pytest.raises(aiohttp.ClientConnectorError):
loop.run_until_complete(connector.connect(req, [], ClientTimeout()))
def test_get_pop_empty_conns(loop) -> None:
# see issue #473
conn = aiohttp.BaseConnector(loop=loop)
key = ('127.0.0.1', 80, False)
conn._conns[key] = []
proto = conn._get(key)
assert proto is None
assert not conn._conns
def test_release_close_do_not_add_to_pool(loop, key) -> None:
# see issue #473
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock(should_close=True)
conn._acquired.add(proto)
conn._release(key, proto)
assert not conn._conns
def test_release_close_do_not_delete_existing_connections(loop, key) -> None:
proto1 = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop)
conn._conns[key] = [(proto1, 1)]
proto = mock.Mock(should_close=True)
conn._acquired.add(proto)
conn._release(key, proto)
assert conn._conns[key] == [(proto1, 1)]
assert proto.close.called
conn.close()
def test_release_not_started(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock(should_close=False)
key = 1
conn._acquired.add(proto)
conn._release(key, proto)
# assert conn._conns == {1: [(proto, 10)]}
rec = conn._conns[1]
assert rec[0][0] == proto
assert rec[0][1] == pytest.approx(loop.time(), abs=0.05)
assert not proto.close.called
conn.close()
def test_release_not_opened(loop, key) -> None:
conn = aiohttp.BaseConnector(loop=loop)
proto = mock.Mock()
conn._acquired.add(proto)
conn._release(key, proto)
assert proto.close.called
async def test_connect(loop, key) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection = await conn.connect(req, [], ClientTimeout())
assert not conn._create_connection.called
assert connection._protocol is proto
assert connection.transport is proto.transport
assert isinstance(connection, Connection)
connection.close()
async def test_connect_tracing(loop) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_connection_create_start = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
on_connection_create_end = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_connection_create_start.append(on_connection_create_start)
trace_config.on_connection_create_end.append(on_connection_create_end)
trace_config.freeze()
traces = [
Trace(
session,
trace_config,
trace_config.trace_config_ctx()
)
]
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop)
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
conn2 = await conn.connect(req, traces, ClientTimeout())
conn2.release()
on_connection_create_start.assert_called_with(
session,
trace_config_ctx,
aiohttp.TraceConnectionCreateStartParams()
)
on_connection_create_end.assert_called_with(
session,
trace_config_ctx,
aiohttp.TraceConnectionCreateEndParams()
)
async def test_close_during_connect(loop) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
fut = loop.create_future()
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop)
conn._create_connection = mock.Mock()
conn._create_connection.return_value = fut
task = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0, loop=loop)
conn.close()
fut.set_result(proto)
with pytest.raises(aiohttp.ClientConnectionError):
await task
assert proto.close.called
def test_ctor_cleanup() -> None:
loop = mock.Mock()
loop.time.return_value = 1.5
conn = aiohttp.BaseConnector(
loop=loop, keepalive_timeout=10, enable_cleanup_closed=True)
assert conn._cleanup_handle is None
assert conn._cleanup_closed_handle is not None
def test_cleanup(key) -> None:
testset = {
key: [(mock.Mock(), 10),
(mock.Mock(), 300)],
}
testset[key][0][0].is_connected.return_value = True
testset[key][1][0].is_connected.return_value = False
loop = mock.Mock()
loop.time.return_value = 300
conn = aiohttp.BaseConnector(loop=loop)
conn._conns = testset
existing_handle = conn._cleanup_handle = mock.Mock()
conn._cleanup()
assert existing_handle.cancel.called
assert conn._conns == {}
assert conn._cleanup_handle is not None
def test_cleanup_close_ssl_transport(ssl_key) -> None:
proto = mock.Mock()
testset = {ssl_key: [(proto, 10)]}
loop = mock.Mock()
loop.time.return_value = 300
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
conn._conns = testset
existing_handle = conn._cleanup_handle = mock.Mock()
conn._cleanup()
assert existing_handle.cancel.called
assert conn._conns == {}
assert conn._cleanup_closed_transports == [proto.close.return_value]
def test_cleanup2() -> None:
testset = {1: [(mock.Mock(), 300)]}
testset[1][0][0].is_connected.return_value = True
loop = mock.Mock()
loop.time.return_value = 300
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
conn._conns = testset
conn._cleanup()
assert conn._conns == testset
assert conn._cleanup_handle is not None
loop.call_at.assert_called_with(310, mock.ANY, mock.ANY)
conn.close()
def test_cleanup3(key) -> None:
testset = {key: [(mock.Mock(), 290.1),
(mock.Mock(), 305.1)]}
testset[key][0][0].is_connected.return_value = True
loop = mock.Mock()
loop.time.return_value = 308.5
conn = aiohttp.BaseConnector(loop=loop, keepalive_timeout=10)
conn._conns = testset
conn._cleanup()
assert conn._conns == {key: [testset[key][1]]}
assert conn._cleanup_handle is not None
loop.call_at.assert_called_with(319, mock.ANY, mock.ANY)
conn.close()
def test_cleanup_closed(loop, mocker) -> None:
if not hasattr(loop, '__dict__'):
pytest.skip("can not override loop attributes")
mocker.spy(loop, 'call_at')
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
tr = mock.Mock()
conn._cleanup_closed_handle = cleanup_closed_handle = mock.Mock()
conn._cleanup_closed_transports = [tr]
conn._cleanup_closed()
assert tr.abort.called
assert not conn._cleanup_closed_transports
assert loop.call_at.called
assert cleanup_closed_handle.cancel.called
def test_cleanup_closed_disabled(loop, mocker) -> None:
conn = aiohttp.BaseConnector(
loop=loop, enable_cleanup_closed=False)
tr = mock.Mock()
conn._cleanup_closed_transports = [tr]
conn._cleanup_closed()
assert tr.abort.called
assert not conn._cleanup_closed_transports
def test_tcp_connector_ctor(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop)
assert conn._ssl is None
assert conn.use_dns_cache
assert conn.family == 0
def test_tcp_connector_ctor_fingerprint_valid(loop) -> None:
valid = aiohttp.Fingerprint(hashlib.sha256(b"foo").digest())
conn = aiohttp.TCPConnector(ssl=valid, loop=loop)
assert conn._ssl is valid
def test_insecure_fingerprint_md5(loop) -> None:
with pytest.raises(ValueError):
aiohttp.TCPConnector(
ssl=aiohttp.Fingerprint(hashlib.md5(b"foo").digest()),
loop=loop)
def test_insecure_fingerprint_sha1(loop) -> None:
with pytest.raises(ValueError):
aiohttp.TCPConnector(
ssl=aiohttp.Fingerprint(hashlib.sha1(b"foo").digest()),
loop=loop)
def test_tcp_connector_clear_dns_cache(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop)
hosts = ['a', 'b']
conn._cached_hosts.add(('localhost', 123), hosts)
conn._cached_hosts.add(('localhost', 124), hosts)
conn.clear_dns_cache('localhost', 123)
with pytest.raises(KeyError):
conn._cached_hosts.next_addrs(('localhost', 123))
assert conn._cached_hosts.next_addrs(('localhost', 124)) == hosts
# Remove removed element is OK
conn.clear_dns_cache('localhost', 123)
with pytest.raises(KeyError):
conn._cached_hosts.next_addrs(('localhost', 123))
conn.clear_dns_cache()
with pytest.raises(KeyError):
conn._cached_hosts.next_addrs(('localhost', 124))
def test_tcp_connector_clear_dns_cache_bad_args(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop)
with pytest.raises(ValueError):
conn.clear_dns_cache('localhost')
def test_dont_recreate_ssl_context(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop)
ctx = conn._make_ssl_context(True)
assert ctx is conn._make_ssl_context(True)
def test_dont_recreate_ssl_context2(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop)
ctx = conn._make_ssl_context(False)
assert ctx is conn._make_ssl_context(False)
def test___get_ssl_context1(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop)
req = mock.Mock()
req.is_ssl.return_value = False
assert conn._get_ssl_context(req) is None
def test___get_ssl_context2(loop) -> None:
ctx = ssl.SSLContext()
conn = aiohttp.TCPConnector(loop=loop)
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = ctx
assert conn._get_ssl_context(req) is ctx
def test___get_ssl_context3(loop) -> None:
ctx = ssl.SSLContext()
conn = aiohttp.TCPConnector(loop=loop, ssl=ctx)
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = None
assert conn._get_ssl_context(req) is ctx
def test___get_ssl_context4(loop) -> None:
ctx = ssl.SSLContext()
conn = aiohttp.TCPConnector(loop=loop, ssl=ctx)
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = False
assert conn._get_ssl_context(req) is conn._make_ssl_context(False)
def test___get_ssl_context5(loop) -> None:
ctx = ssl.SSLContext()
conn = aiohttp.TCPConnector(loop=loop, ssl=ctx)
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = aiohttp.Fingerprint(hashlib.sha256(b'1').digest())
assert conn._get_ssl_context(req) is conn._make_ssl_context(False)
def test___get_ssl_context6(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop)
req = mock.Mock()
req.is_ssl.return_value = True
req.ssl = None
assert conn._get_ssl_context(req) is conn._make_ssl_context(True)
def test_close_twice(loop) -> None:
proto = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop)
conn._conns[1] = [(proto, object())]
conn.close()
assert not conn._conns
assert proto.close.called
assert conn.closed
conn._conns = 'Invalid' # fill with garbage
conn.close()
assert conn.closed
def test_close_cancels_cleanup_handle(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
conn._release(1, mock.Mock(should_close=False))
assert conn._cleanup_handle is not None
conn.close()
assert conn._cleanup_handle is None
def test_close_abort_closed_transports(loop) -> None:
tr = mock.Mock()
conn = aiohttp.BaseConnector(loop=loop)
conn._cleanup_closed_transports.append(tr)
conn.close()
assert not conn._cleanup_closed_transports
assert tr.abort.called
assert conn.closed
def test_close_cancels_cleanup_closed_handle(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop, enable_cleanup_closed=True)
assert conn._cleanup_closed_handle is not None
conn.close()
assert conn._cleanup_closed_handle is None
def test_ctor_with_default_loop() -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
conn = aiohttp.BaseConnector()
assert loop is conn._loop
loop.close()
async def test_connect_with_limit(loop, key) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost:80'),
loop=loop,
response_class=mock.Mock())
conn = aiohttp.BaseConnector(loop=loop, limit=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection1 = await conn.connect(req, None, ClientTimeout())
assert connection1._protocol == proto
assert 1 == len(conn._acquired)
assert proto in conn._acquired
assert key in conn._acquired_per_host
assert proto in conn._acquired_per_host[key]
acquired = False
async def f():
nonlocal acquired
connection2 = await conn.connect(req, None, ClientTimeout())
acquired = True
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
connection2.release()
task = loop.create_task(f())
await asyncio.sleep(0.01, loop=loop)
assert not acquired
connection1.release()
await asyncio.sleep(0, loop=loop)
assert acquired
await task
conn.close()
async def test_connect_queued_operation_tracing(loop, key) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_connection_queued_start = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
on_connection_queued_end = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_connection_queued_start.append(on_connection_queued_start)
trace_config.on_connection_queued_end.append(on_connection_queued_end)
trace_config.freeze()
traces = [
Trace(
session,
trace_config,
trace_config.trace_config_ctx()
)
]
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost1:80'),
loop=loop,
response_class=mock.Mock())
conn = aiohttp.BaseConnector(loop=loop, limit=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection1 = await conn.connect(req, traces, ClientTimeout())
async def f():
connection2 = await conn.connect(req, traces, ClientTimeout())
on_connection_queued_start.assert_called_with(
session,
trace_config_ctx,
aiohttp.TraceConnectionQueuedStartParams()
)
on_connection_queued_end.assert_called_with(
session,
trace_config_ctx,
aiohttp.TraceConnectionQueuedEndParams()
)
connection2.release()
task = asyncio.ensure_future(f(), loop=loop)
await asyncio.sleep(0.01, loop=loop)
connection1.release()
await task
conn.close()
async def test_connect_reuseconn_tracing(loop, key) -> None:
session = mock.Mock()
trace_config_ctx = mock.Mock()
on_connection_reuseconn = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock())
)
trace_config = aiohttp.TraceConfig(
trace_config_ctx_factory=mock.Mock(return_value=trace_config_ctx)
)
trace_config.on_connection_reuseconn.append(on_connection_reuseconn)
trace_config.freeze()
traces = [
Trace(
session,
trace_config,
trace_config.trace_config_ctx()
)
]
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost:80'),
loop=loop,
response_class=mock.Mock())
conn = aiohttp.BaseConnector(loop=loop, limit=1)
conn._conns[key] = [(proto, loop.time())]
conn2 = await conn.connect(req, traces, ClientTimeout())
conn2.release()
on_connection_reuseconn.assert_called_with(
session,
trace_config_ctx,
aiohttp.TraceConnectionReuseconnParams()
)
conn.close()
async def test_connect_with_limit_and_limit_per_host(loop, key) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=1000, limit_per_host=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
acquired = False
connection1 = await conn.connect(req, None, ClientTimeout())
async def f():
nonlocal acquired
connection2 = await conn.connect(req, None, ClientTimeout())
acquired = True
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
connection2.release()
task = loop.create_task(f())
await asyncio.sleep(0.01, loop=loop)
assert not acquired
connection1.release()
await asyncio.sleep(0, loop=loop)
assert acquired
await task
conn.close()
async def test_connect_with_no_limit_and_limit_per_host(loop, key) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost1:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=0, limit_per_host=1)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
acquired = False
connection1 = await conn.connect(req, None, ClientTimeout())
async def f():
nonlocal acquired
connection2 = await conn.connect(req, None, ClientTimeout())
acquired = True
connection2.release()
task = loop.create_task(f())
await asyncio.sleep(0.01, loop=loop)
assert not acquired
connection1.release()
await asyncio.sleep(0, loop=loop)
assert acquired
await task
conn.close()
async def test_connect_with_no_limits(loop, key) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://localhost:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=0, limit_per_host=0)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
acquired = False
connection1 = await conn.connect(req, None, ClientTimeout())
async def f():
nonlocal acquired
connection2 = await conn.connect(req, None, ClientTimeout())
acquired = True
assert 1 == len(conn._acquired)
assert 1 == len(conn._acquired_per_host[key])
connection2.release()
task = loop.create_task(f())
await asyncio.sleep(0.01, loop=loop)
assert acquired
connection1.release()
await task
conn.close()
async def test_connect_with_limit_cancelled(loop) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection = await conn.connect(req, None, ClientTimeout())
assert connection._protocol == proto
assert connection.transport == proto.transport
assert 1 == len(conn._acquired)
with pytest.raises(asyncio.TimeoutError):
# limit exhausted
await asyncio.wait_for(conn.connect(req, None, ClientTimeout()),
0.01, loop=loop)
connection.close()
async def test_connect_with_capacity_release_waiters(loop) -> None:
def check_with_exc(err):
conn = aiohttp.BaseConnector(limit=1, loop=loop)
conn._create_connection = mock.Mock()
conn._create_connection.return_value = \
loop.create_future()
conn._create_connection.return_value.set_exception(err)
with pytest.raises(Exception):
req = mock.Mock()
yield from conn.connect(req, None, ClientTimeout())
assert not conn._waiters
check_with_exc(OSError(1, 'permission error'))
check_with_exc(RuntimeError())
check_with_exc(asyncio.TimeoutError())
async def test_connect_with_limit_concurrent(loop) -> None:
proto = mock.Mock()
proto.should_close = False
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
max_connections = 2
num_connections = 0
conn = aiohttp.BaseConnector(limit=max_connections, loop=loop)
# Use a real coroutine for _create_connection; a mock would mask
# problems that only happen when the method yields.
async def create_connection(req, traces, timeout):
nonlocal num_connections
num_connections += 1
await asyncio.sleep(0, loop=loop)
# Make a new transport mock each time because acquired
# transports are stored in a set. Reusing the same object
# messes with the count.
proto = mock.Mock(should_close=False)
proto.is_connected.return_value = True
return proto
conn._create_connection = create_connection
# Simulate something like a crawler. It opens a connection, does
# something with it, closes it, then creates tasks that make more
# connections and waits for them to finish. The crawler is started
# with multiple concurrent requests and stops when it hits a
# predefined maximum number of requests.
max_requests = 10
num_requests = 0
start_requests = max_connections + 1
async def f(start=True):
nonlocal num_requests
if num_requests == max_requests:
return
num_requests += 1
if not start:
connection = await conn.connect(req, None, ClientTimeout())
await asyncio.sleep(0, loop=loop)
connection.release()
tasks = [
loop.create_task(f(start=False))
for i in range(start_requests)
]
await asyncio.wait(tasks, loop=loop)
await f()
conn.close()
assert max_connections == num_connections
async def test_connect_waiters_cleanup(loop) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=1)
conn._available_connections = mock.Mock(return_value=0)
t = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0, loop=loop)
assert conn._waiters.keys()
t.cancel()
await asyncio.sleep(0, loop=loop)
assert not conn._waiters.keys()
async def test_connect_waiters_cleanup_key_error(loop) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=1)
conn._available_connections = mock.Mock(return_value=0)
t = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0, loop=loop)
assert conn._waiters.keys()
# we delete the entry explicitly before the
# canceled connection grabs the loop again, we
# must expect a none failure termination
conn._waiters.clear()
t.cancel()
await asyncio.sleep(0, loop=loop)
assert not conn._waiters.keys() == []
async def test_close_with_acquired_connection(loop) -> None:
proto = mock.Mock()
proto.is_connected.return_value = True
req = ClientRequest('GET', URL('http://host:80'), loop=loop)
conn = aiohttp.BaseConnector(loop=loop, limit=1)
key = ('host', 80, False)
conn._conns[key] = [(proto, loop.time())]
conn._create_connection = mock.Mock()
conn._create_connection.return_value = loop.create_future()
conn._create_connection.return_value.set_result(proto)
connection = await conn.connect(req, None, ClientTimeout())
assert 1 == len(conn._acquired)
conn.close()
assert 0 == len(conn._acquired)
assert conn.closed
proto.close.assert_called_with()
assert not connection.closed
connection.close()
assert connection.closed
def test_default_force_close(loop) -> None:
connector = aiohttp.BaseConnector(loop=loop)
assert not connector.force_close
def test_limit_property(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop, limit=15)
assert 15 == conn.limit
conn.close()
def test_limit_per_host_property(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop, limit_per_host=15)
assert 15 == conn.limit_per_host
conn.close()
def test_limit_property_default(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
assert conn.limit == 100
conn.close()
def test_limit_per_host_property_default(loop) -> None:
conn = aiohttp.BaseConnector(loop=loop)
assert conn.limit_per_host == 0
conn.close()
def test_force_close_and_explicit_keep_alive(loop) -> None:
with pytest.raises(ValueError):
aiohttp.BaseConnector(loop=loop, keepalive_timeout=30,
force_close=True)
conn = aiohttp.BaseConnector(loop=loop, force_close=True,
keepalive_timeout=None)
assert conn
conn = aiohttp.BaseConnector(loop=loop, force_close=True)
assert conn
async def test_error_on_connection(loop, key) -> None:
conn = aiohttp.BaseConnector(limit=1, loop=loop)
req = mock.Mock()
req.connection_key = key
proto = mock.Mock()
i = 0
fut = loop.create_future()
exc = OSError()
async def create_connection(req, traces, timeout):
nonlocal i
i += 1
if i == 1:
await fut
raise exc
elif i == 2:
return proto
conn._create_connection = create_connection
t1 = loop.create_task(conn.connect(req, None, ClientTimeout()))
t2 = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0, loop=loop)
assert not t1.done()
assert not t2.done()
assert len(conn._acquired_per_host[key]) == 1
fut.set_result(None)
with pytest.raises(OSError):
await t1
ret = await t2
assert len(conn._acquired_per_host[key]) == 1
assert ret._key == key
assert ret.protocol == proto
assert proto in conn._acquired
ret.release()
async def test_cancelled_waiter(loop) -> None:
conn = aiohttp.BaseConnector(limit=1, loop=loop)
req = mock.Mock()
req.connection_key = 'key'
proto = mock.Mock()
async def create_connection(req, traces=None):
await asyncio.sleep(1)
return proto
conn._create_connection = create_connection
conn._acquired.add(proto)
conn2 = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0, loop=loop)
conn2.cancel()
with pytest.raises(asyncio.CancelledError):
await conn2
async def test_error_on_connection_with_cancelled_waiter(loop, key) -> None:
conn = aiohttp.BaseConnector(limit=1, loop=loop)
req = mock.Mock()
req.connection_key = key
proto = mock.Mock()
i = 0
fut1 = loop.create_future()
fut2 = loop.create_future()
exc = OSError()
async def create_connection(req, traces, timeout):
nonlocal i
i += 1
if i == 1:
await fut1
raise exc
if i == 2:
await fut2
elif i == 3:
return proto
conn._create_connection = create_connection
t1 = loop.create_task(conn.connect(req, None, ClientTimeout()))
t2 = loop.create_task(conn.connect(req, None, ClientTimeout()))
t3 = loop.create_task(conn.connect(req, None, ClientTimeout()))
await asyncio.sleep(0, loop=loop)
assert not t1.done()
assert not t2.done()
assert len(conn._acquired_per_host[key]) == 1
fut1.set_result(None)
fut2.cancel()
with pytest.raises(OSError):
await t1
with pytest.raises(asyncio.CancelledError):
await t2
ret = await t3
assert len(conn._acquired_per_host[key]) == 1
assert ret._key == key
assert ret.protocol == proto
assert proto in conn._acquired
ret.release()
async def test_tcp_connector(aiohttp_client, loop) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
client = await aiohttp_client(app)
r = await client.get('/')
assert r.status == 200
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix socket")
def test_unix_connector_not_found(loop) -> None:
connector = aiohttp.UnixConnector('/' + uuid.uuid4().hex, loop=loop)
req = ClientRequest(
'GET', URL('http://www.python.org'),
loop=loop)
with pytest.raises(aiohttp.ClientConnectorError):
loop.run_until_complete(connector.connect(req, None, ClientTimeout()))
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason="requires unix socket")
def test_unix_connector_permission(loop) -> None:
loop.create_unix_connection = make_mocked_coro(
raise_exception=PermissionError())
connector = aiohttp.UnixConnector('/' + uuid.uuid4().hex, loop=loop)
req = ClientRequest(
'GET', URL('http://www.python.org'),
loop=loop)
with pytest.raises(aiohttp.ClientConnectorError):
loop.run_until_complete(connector.connect(req, None, ClientTimeout()))
def test_default_use_dns_cache(loop) -> None:
conn = aiohttp.TCPConnector(loop=loop)
assert conn.use_dns_cache
async def test_resolver_not_called_with_address_is_ip(loop) -> None:
resolver = mock.MagicMock()
connector = aiohttp.TCPConnector(resolver=resolver)
req = ClientRequest('GET',
URL('http://127.0.0.1:{}'.format(unused_port())),
loop=loop,
response_class=mock.Mock())
with pytest.raises(OSError):
await connector.connect(req, None, ClientTimeout())
resolver.resolve.assert_not_called()
async def test_tcp_connector_raise_connector_ssl_error(aiohttp_server) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
keyfile = os.path.join(here, 'sample.key')
certfile = os.path.join(here, 'sample.crt')
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.load_cert_chain(certfile, keyfile)
srv = await aiohttp_server(app, ssl=sslcontext)
port = unused_port()
conn = aiohttp.TCPConnector(local_addr=('127.0.0.1', port))
session = aiohttp.ClientSession(connector=conn)
url = srv.make_url('/')
if PY_37:
err = aiohttp.ClientConnectorCertificateError
else:
err = aiohttp.ClientConnectorSSLError
with pytest.raises(err) as ctx:
await session.get(url)
if PY_37:
assert isinstance(ctx.value, aiohttp.ClientConnectorCertificateError)
assert isinstance(ctx.value.certificate_error, ssl.SSLError)
else:
assert isinstance(ctx.value, aiohttp.ClientSSLError)
assert isinstance(ctx.value.os_error, ssl.SSLError)
await session.close()
async def test_tcp_connector_do_not_raise_connector_ssl_error(
aiohttp_server) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
keyfile = os.path.join(here, 'sample.key')
certfile = os.path.join(here, 'sample.crt')
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.load_cert_chain(certfile, keyfile)
srv = await aiohttp_server(app, ssl=sslcontext)
port = unused_port()
conn = aiohttp.TCPConnector(local_addr=('127.0.0.1', port))
session = aiohttp.ClientSession(connector=conn)
url = srv.make_url('/')
r = await session.get(url, ssl=sslcontext)
r.release()
first_conn = next(iter(conn._conns.values()))[0][0]
try:
_sslcontext = first_conn.transport._ssl_protocol._sslcontext
except AttributeError:
_sslcontext = first_conn.transport._sslcontext
assert _sslcontext is sslcontext
r.close()
await session.close()
conn.close()
async def test_tcp_connector_uses_provided_local_addr(aiohttp_server) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
srv = await aiohttp_server(app)
port = unused_port()
conn = aiohttp.TCPConnector(local_addr=('127.0.0.1', port))
session = aiohttp.ClientSession(connector=conn)
url = srv.make_url('/')
r = await session.get(url)
r.release()
first_conn = next(iter(conn._conns.values()))[0][0]
assert first_conn.transport.get_extra_info(
'sockname') == ('127.0.0.1', port)
r.close()
await session.close()
conn.close()
@pytest.mark.skipif(not hasattr(socket, 'AF_UNIX'),
reason='requires UNIX sockets')
async def test_unix_connector(unix_server, unix_sockname) -> None:
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await unix_server(app)
url = "http://127.0.0.1/"
connector = aiohttp.UnixConnector(unix_sockname)
assert unix_sockname == connector.path
session = client.ClientSession(connector=connector)
r = await session.get(url)
assert r.status == 200
r.close()
await session.close()
class TestDNSCacheTable:
@pytest.fixture
def dns_cache_table(self):
return _DNSCacheTable()
def test_next_addrs_basic(self, dns_cache_table) -> None:
dns_cache_table.add('localhost', ['127.0.0.1'])
dns_cache_table.add('foo', ['127.0.0.2'])
addrs = dns_cache_table.next_addrs('localhost')
assert addrs == ['127.0.0.1']
addrs = dns_cache_table.next_addrs('foo')
assert addrs == ['127.0.0.2']
with pytest.raises(KeyError):
dns_cache_table.next_addrs('no-such-host')
def test_remove(self, dns_cache_table) -> None:
dns_cache_table.add('localhost', ['127.0.0.1'])
dns_cache_table.remove('localhost')
with pytest.raises(KeyError):
dns_cache_table.next_addrs('localhost')
def test_clear(self, dns_cache_table) -> None:
dns_cache_table.add('localhost', ['127.0.0.1'])
dns_cache_table.clear()
with pytest.raises(KeyError):
dns_cache_table.next_addrs('localhost')
def test_not_expired_ttl_None(self, dns_cache_table) -> None:
dns_cache_table.add('localhost', ['127.0.0.1'])
assert not dns_cache_table.expired('localhost')
def test_not_expired_ttl(self) -> None:
dns_cache_table = _DNSCacheTable(ttl=0.1)
dns_cache_table.add('localhost', ['127.0.0.1'])
assert not dns_cache_table.expired('localhost')
async def test_expired_ttl(self, loop) -> None:
dns_cache_table = _DNSCacheTable(ttl=0.01)
dns_cache_table.add('localhost', ['127.0.0.1'])
await asyncio.sleep(0.02, loop=loop)
assert dns_cache_table.expired('localhost')
def test_next_addrs(self, dns_cache_table) -> None:
dns_cache_table.add('foo', ['127.0.0.1', '127.0.0.2', '127.0.0.3'])
# Each calls to next_addrs return the hosts using
# a round robin strategy.
addrs = dns_cache_table.next_addrs('foo')
assert addrs == ['127.0.0.1', '127.0.0.2', '127.0.0.3']
addrs = dns_cache_table.next_addrs('foo')
assert addrs == ['127.0.0.2', '127.0.0.3', '127.0.0.1']
addrs = dns_cache_table.next_addrs('foo')
assert addrs == ['127.0.0.3', '127.0.0.1', '127.0.0.2']
addrs = dns_cache_table.next_addrs('foo')
assert addrs == ['127.0.0.1', '127.0.0.2', '127.0.0.3']
def test_next_addrs_single(self, dns_cache_table) -> None:
dns_cache_table.add('foo', ['127.0.0.1'])
addrs = dns_cache_table.next_addrs('foo')
assert addrs == ['127.0.0.1']
addrs = dns_cache_table.next_addrs('foo')
assert addrs == ['127.0.0.1']
| 29.502515 | 79 | 0.6601 |
f5bd37e3c3c9b584a909ec826e92c40c88c00db9 | 119 | py | Python | certbot/__init__.py | pub-repository/letsencrypt | c4684f187a4ce4ef13425cfba607dec9d8bfa963 | [
"Apache-2.0"
] | 1 | 2019-12-29T16:34:18.000Z | 2019-12-29T16:34:18.000Z | certbot/__init__.py | pub-repository/letsencrypt | c4684f187a4ce4ef13425cfba607dec9d8bfa963 | [
"Apache-2.0"
] | null | null | null | certbot/__init__.py | pub-repository/letsencrypt | c4684f187a4ce4ef13425cfba607dec9d8bfa963 | [
"Apache-2.0"
] | 1 | 2019-12-29T16:34:20.000Z | 2019-12-29T16:34:20.000Z | """Certbot client."""
# version number like 1.2.3a0, must have at least 2 parts, like 1.2
__version__ = '0.37.0.dev0'
| 23.8 | 67 | 0.680672 |
cc5db4ad771c81470dd2885f31a55898112e33ec | 2,507 | py | Python | scripts/dmriqc_frf.py | arnaudbore/dmriqcpy | be3efa95753324809940614b3499a5004757ec33 | [
"MIT"
] | null | null | null | scripts/dmriqc_frf.py | arnaudbore/dmriqcpy | be3efa95753324809940614b3499a5004757ec33 | [
"MIT"
] | 12 | 2021-05-09T12:29:26.000Z | 2022-03-22T13:00:52.000Z | scripts/dmriqc_frf.py | arnaudbore/dmriqcpy | be3efa95753324809940614b3499a5004757ec33 | [
"MIT"
] | 3 | 2021-04-09T18:22:41.000Z | 2021-06-03T22:22:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import shutil
import numpy as np
from dmriqcpy.analysis.stats import stats_frf
from dmriqcpy.io.report import Report
from dmriqcpy.io.utils import (add_online_arg, add_overwrite_arg,
assert_inputs_exist, assert_outputs_exist)
from dmriqcpy.viz.graph import graph_frf
from dmriqcpy.viz.utils import analyse_qa, dataframe_to_html
DESCRIPTION = """
Compute the fiber response function (frf) report in HTML format.
"""
def _build_arg_parser():
p = argparse.ArgumentParser(description=DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('frf', nargs='+',
help='Fiber response function (frf) files (in txt format).')
p.add_argument('output_report',
help='Filename of QC report (in html format).')
add_online_arg(p)
add_overwrite_arg(p)
return p
def main():
parser = _build_arg_parser()
args = parser.parse_args()
assert_inputs_exist(parser, args.frf)
assert_outputs_exist(parser, args, [args.output_report, "libs"])
if os.path.exists("libs"):
shutil.rmtree("libs")
name = "FRF"
metrics_names = ["Mean Eigen value 1", "Mean Eigen value 2", "Mean B0"]
warning_dict = {}
summary, stats = stats_frf(metrics_names, args.frf)
warning_dict[name] = analyse_qa(summary, stats, metrics_names)
warning_list = np.concatenate([filenames for filenames in warning_dict[name].values()])
warning_dict[name]['nb_warnings'] = len(np.unique(warning_list))
graphs = []
graph = graph_frf("FRF", metrics_names, summary, args.online)
graphs.append(graph)
summary_dict = {}
stats_html = dataframe_to_html(stats)
summary_dict[name] = stats_html
metrics_dict = {}
subjects_dict = {}
for subj_metric in args.frf:
summary_html = dataframe_to_html(summary.loc[subj_metric])
subjects_dict[subj_metric] = {}
subjects_dict[subj_metric]['stats'] = summary_html
metrics_dict[name] = subjects_dict
nb_subjects = len(args.frf)
report = Report(args.output_report)
report.generate(title="Quality Assurance FRF",
nb_subjects=nb_subjects, summary_dict=summary_dict,
graph_array=graphs, metrics_dict=metrics_dict,
warning_dict=warning_dict,
online=args.online)
if __name__ == '__main__':
main()
| 29.494118 | 91 | 0.670921 |
b9b3b41c32426522c90f5063d41c0d80abb867bc | 2,138 | py | Python | sdks/python/apache_beam/io/gcp/bigquery_io_read_it_test.py | rodrigob/beam | e2ce4037f85619f946b3d6a3a90955cdf1c19b4a | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2020-08-30T14:49:58.000Z | 2020-08-30T15:02:38.000Z | sdks/python/apache_beam/io/gcp/bigquery_io_read_it_test.py | rodrigob/beam | e2ce4037f85619f946b3d6a3a90955cdf1c19b4a | [
"Apache-2.0",
"BSD-3-Clause"
] | 11 | 2018-05-22T06:08:39.000Z | 2018-10-05T15:02:21.000Z | sdks/python/apache_beam/io/gcp/bigquery_io_read_it_test.py | rodrigob/beam | e2ce4037f85619f946b3d6a3a90955cdf1c19b4a | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Dataflow job that counts the number of rows in a BQ table.
Can be configured to simulate slow reading for a given number of rows.
"""
from __future__ import absolute_import
import logging
import unittest
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.io.gcp import bigquery_io_read_pipeline
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
class BigqueryIOReadIT(unittest.TestCase):
DEFAULT_DATASET = "big_query_import_export"
DEFAULT_TABLE_PREFIX = "export_"
NUM_RECORDS = {"1K": 1000,}
def run_bigquery_io_read_pipeline(self, input_size):
test_pipeline = TestPipeline(is_integration_test=True)
pipeline_verifiers = [PipelineStateMatcher(),]
extra_opts = {'input_table': self.DEFAULT_DATASET + "." +
self.DEFAULT_TABLE_PREFIX + input_size,
'num_records': self.NUM_RECORDS[input_size],
'on_success_matcher': all_of(*pipeline_verifiers)}
bigquery_io_read_pipeline.run(test_pipeline.get_full_options_as_args(
**extra_opts))
@attr('IT')
def test_1K_table(self):
self.run_bigquery_io_read_pipeline('1K')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 35.04918 | 74 | 0.755379 |
3ad4799a60594e7ed0e74b28e4806078b269fb31 | 1,608 | py | Python | linked_list/singly_linked_list/linked_queue.py | zhaijingrong/PythonAlgorithm | 7596194ca0f94cfdb302fc9d826b0b02329824c6 | [
"MIT"
] | null | null | null | linked_list/singly_linked_list/linked_queue.py | zhaijingrong/PythonAlgorithm | 7596194ca0f94cfdb302fc9d826b0b02329824c6 | [
"MIT"
] | null | null | null | linked_list/singly_linked_list/linked_queue.py | zhaijingrong/PythonAlgorithm | 7596194ca0f94cfdb302fc9d826b0b02329824c6 | [
"MIT"
] | null | null | null | class Empty(Exception):
pass
class LinkedQueue:
""" FIFO queue implementation using a singly linked list for storage. """
class _Node:
""" Lightweight, nonpublic class for storing a singly linked node. """
__slots__ = '_element', '_next'
def __init__(self, e, next):
self._element = e
self._next = next
def __init__(self):
""" Create an empty queue. """
self._head = None
self._tail = None
self._size = 0
def __len__(self):
""" Return the number of elements in the queue. """
return self._size
def is_empty(self):
""" Return True if the queue is empty. """
return self._size == 0
def first(self):
""" Return (but do not remove)the element at the front of the queue"""
if self.is_empty():
raise Empty('Queue is empty')
return self._head._element
def dequeue(self):
""" Remove and return the first element of the queue.
Raise Empty exception if the queue is empty
"""
if self.is_empty():
raise Empty('Queue is empty')
answer = self._head._element
self._head = self._head._next
self._size -= 1
if self.is_empty():
self._tail = None
return answer
def enqueue(self, e):
""" Add an element to the back of queue. """
newest = self._Node(e, None)
if self.is_empty():
self._head = newest
else:
self._tail._next = newest
self._tail = newest
self._size += 1 | 28.210526 | 78 | 0.558458 |
0377b485df1aa9d9eaf668b5645760688ded7212 | 364 | py | Python | keyscraper/pipelines.py | YuChenHeMTL/KeyScraper | 9e40ac99499c8b149a060b7089ad16e47813cd87 | [
"MIT"
] | null | null | null | keyscraper/pipelines.py | YuChenHeMTL/KeyScraper | 9e40ac99499c8b149a060b7089ad16e47813cd87 | [
"MIT"
] | null | null | null | keyscraper/pipelines.py | YuChenHeMTL/KeyScraper | 9e40ac99499c8b149a060b7089ad16e47813cd87 | [
"MIT"
] | null | null | null | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class KeyscraperPipeline:
def process_item(self, item, spider):
return item
| 26 | 66 | 0.769231 |
2050f2c5d8c7489cd012ce58200e36ac720bf41d | 39,813 | py | Python | src/oic/oic/message.py | alanbuxey/pyoidc | 5f2d9ac468aaad599260f70481062c9d31273da2 | [
"Apache-2.0"
] | null | null | null | src/oic/oic/message.py | alanbuxey/pyoidc | 5f2d9ac468aaad599260f70481062c9d31273da2 | [
"Apache-2.0"
] | null | null | null | src/oic/oic/message.py | alanbuxey/pyoidc | 5f2d9ac468aaad599260f70481062c9d31273da2 | [
"Apache-2.0"
] | null | null | null | import inspect
import json
import logging
import sys
import time
import warnings
from typing import Dict # noqa
from typing import List # noqa
from urllib.parse import urlencode
from urllib.parse import urlparse
from jwkest import jws
from jwkest.jwe import JWEException
from jwkest.jwe import factory as JWE_factory
from jwkest.jwt import JWT
from oic.exception import InvalidRequest
from oic.exception import IssuerMismatch
from oic.exception import MessageException
from oic.exception import NotForMe
from oic.exception import PyoidcError
from oic.oauth2 import message
from oic.oauth2.exception import VerificationError
from oic.oauth2.message import OPTIONAL_LIST_OF_SP_SEP_STRINGS
from oic.oauth2.message import OPTIONAL_LIST_OF_STRINGS
from oic.oauth2.message import REQUIRED_LIST_OF_SP_SEP_STRINGS
from oic.oauth2.message import REQUIRED_LIST_OF_STRINGS
from oic.oauth2.message import SINGLE_OPTIONAL_INT
from oic.oauth2.message import SINGLE_OPTIONAL_JSON
from oic.oauth2.message import SINGLE_OPTIONAL_STRING
from oic.oauth2.message import SINGLE_REQUIRED_STRING
from oic.oauth2.message import Message
from oic.oauth2.message import MessageFactory
from oic.oauth2.message import MessageTuple
from oic.oauth2.message import MissingRequiredAttribute
from oic.oauth2.message import MissingRequiredValue
from oic.oauth2.message import NotAllowedValue
from oic.oauth2.message import ParamDefinition
from oic.oauth2.message import SchemeError
from oic.utils import time_util
from oic.utils.time_util import utc_time_sans_frac
__author__ = "rohe0002"
logger = logging.getLogger(__name__)
NONCE_STORAGE_TIME = 4 * 3600
class AtHashError(VerificationError):
pass
class CHashError(VerificationError):
pass
class EXPError(VerificationError):
pass
class IATError(VerificationError):
pass
def json_ser(val, sformat=None, lev=0):
return json.dumps(val)
def json_deser(val, sformat=None, lev=0):
return json.loads(val)
def json_conv(val, sformat=None, lev=0):
if isinstance(val, dict):
for key, _val in val.items():
if _val is None:
val[key] = "none"
elif _val is True:
val[key] = "true"
elif _val is False:
val[key] = "false"
return val
def json_rest(val, sformat=None, lev=0):
if isinstance(val, dict):
for key, _val in val.items():
if _val == "none":
val[key] = None
elif _val == "true":
val[key] = True
elif _val == "false":
val[key] = False
return val
# value type, required, serializer, deserializer, null value allowed
SINGLE_OPTIONAL_BOOLEAN = ParamDefinition(bool, False, None, None, False)
SINGLE_OPTIONAL_JSON_WN = ParamDefinition(dict, False, json_ser, json_deser, True)
SINGLE_OPTIONAL_JSON_CONV = ParamDefinition(dict, False, json_conv, json_rest, True)
SINGLE_REQUIRED_INT = ParamDefinition(int, True, None, None, False)
def idtoken_deser(val, sformat="urlencoded"):
# id_token are always serialized as a JWT
return IdToken().deserialize(val, "jwt")
def address_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
elif sformat == "dict":
sformat = "json"
return AddressClaim().deserialize(val, sformat)
def claims_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return Claims().deserialize(val, sformat)
def message_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return Message().deserialize(val, sformat)
def msg_ser(inst, sformat, lev=0):
if sformat in ["urlencoded", "json"]:
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
else:
res = inst
elif sformat == "dict":
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
elif isinstance(inst, dict):
res = inst
elif isinstance(inst, str): # Iff ID Token
res = inst
else:
raise MessageException("Wrong type: %s" % type(inst))
else:
raise PyoidcError("Unknown sformat", inst)
return res
def msg_ser_json(inst, sformat="json", lev=0):
# sformat = "json" always except when dict
if lev:
sformat = "dict"
if sformat == "dict":
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
elif isinstance(inst, dict):
res = inst
else:
raise MessageException("Wrong type: %s" % type(inst))
else:
sformat = "json"
if isinstance(inst, Message):
res = inst.serialize(sformat, lev)
else:
res = inst
return res
def msg_list_ser(insts, sformat, lev=0):
return [msg_ser(inst, sformat, lev) for inst in insts]
def claims_ser(val, sformat="urlencoded", lev=0):
# everything in c_extension
if isinstance(val, str):
item = val
elif isinstance(val, list):
item = val[0]
else:
item = val
if isinstance(item, Message):
return item.serialize(method=sformat, lev=lev + 1)
if sformat == "urlencoded":
assert isinstance( # nosec
item, dict
) # We cannot urlencode anything else than Mapping
res = urlencode(item)
elif sformat == "json":
if lev:
res = item
else:
res = json.dumps(item)
elif sformat == "dict":
if isinstance(item, dict):
res = item
else:
raise MessageException("Wrong type: %s" % type(item))
else:
raise PyoidcError("Unknown sformat: %s" % sformat, val)
return res
def registration_request_deser(val, sformat="urlencoded"):
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return RegistrationRequest().deserialize(val, sformat)
def claims_request_deser(val, sformat="json"):
# never 'urlencoded'
if sformat == "urlencoded":
sformat = "json"
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return ClaimsRequest().deserialize(val, sformat)
OPTIONAL_ADDRESS = ParamDefinition(Message, False, msg_ser, address_deser, False)
OPTIONAL_LOGICAL = ParamDefinition(bool, False, None, None, False)
OPTIONAL_MULTIPLE_Claims = ParamDefinition(
Message, False, claims_ser, claims_deser, False
)
SINGLE_OPTIONAL_IDTOKEN = ParamDefinition(str, False, msg_ser, None, False)
SINGLE_OPTIONAL_REGISTRATION_REQUEST = ParamDefinition(
Message, False, msg_ser, registration_request_deser, False
)
SINGLE_OPTIONAL_CLAIMSREQ = ParamDefinition(
Message, False, msg_ser_json, claims_request_deser, False
)
OPTIONAL_MESSAGE = ParamDefinition(Message, False, msg_ser, message_deser, False)
REQUIRED_MESSAGE = ParamDefinition(Message, True, msg_ser, message_deser, False)
# ----------------------------------------------------------------------------
SCOPE_CHARSET = []
for char in ["\x21", ("\x23", "\x5b"), ("\x5d", "\x7E")]:
if isinstance(char, tuple):
c = char[0]
while c <= char[1]:
SCOPE_CHARSET.append(c)
c = chr(ord(c) + 1)
else:
SCOPE_CHARSET.append(set)
def check_char_set(string, allowed):
for c in string:
if c not in allowed:
raise NotAllowedValue("'%c' not in the allowed character set" % c)
TOKEN_VERIFY_ARGS = ["key", "keyjar", "algs", "sender"]
def verify_id_token(instance, check_hash=False, **kwargs):
# Try to decode the JWT, checks the signature
args = {}
for arg in TOKEN_VERIFY_ARGS:
try:
args[arg] = kwargs[arg]
except KeyError:
pass
_jws = str(instance["id_token"])
# It can be encrypted, so try to decrypt first
_jwe = JWE_factory(_jws)
if _jwe is not None:
try:
_jws = _jwe.decrypt(keys=kwargs["keyjar"].get_decrypt_key())
except JWEException as err:
raise VerificationError("Could not decrypt id_token", err)
_packer = JWT()
_body = _packer.unpack(_jws).payload()
if "keyjar" in kwargs:
try:
if _body["iss"] not in kwargs["keyjar"]:
raise ValueError("Unknown issuer")
except KeyError:
raise MissingRequiredAttribute("iss")
if _jwe is not None:
# Use the original encrypted token to set correct headers
idt = IdToken().from_jwt(str(instance["id_token"]), **args)
else:
idt = IdToken().from_jwt(_jws, **args)
if not idt.verify(**kwargs):
raise VerificationError("Could not verify id_token", idt)
if check_hash:
_alg = idt.jws_header["alg"]
# What if _alg == 'none'
hfunc = "HS" + _alg[-3:]
if "access_token" in instance:
if "at_hash" not in idt:
raise MissingRequiredAttribute("Missing at_hash property", idt)
if idt["at_hash"] != jws.left_hash(instance["access_token"], hfunc):
raise AtHashError("Failed to verify access_token hash", idt)
if "code" in instance:
if "c_hash" not in idt:
raise MissingRequiredAttribute("Missing c_hash property", idt)
if idt["c_hash"] != jws.left_hash(instance["code"], hfunc):
raise CHashError("Failed to verify code hash", idt)
return idt
# -----------------------------------------------------------------------------
class RefreshAccessTokenRequest(message.RefreshAccessTokenRequest):
pass
class TokenErrorResponse(message.TokenErrorResponse):
pass
class AccessTokenResponse(message.AccessTokenResponse):
c_param = message.AccessTokenResponse.c_param.copy()
c_param.update({"id_token": SINGLE_OPTIONAL_STRING})
def verify(self, **kwargs):
super().verify(**kwargs)
if "id_token" in self:
# replace the JWT with the verified IdToken instance
self["id_token"] = verify_id_token(self, **kwargs)
return True
class UserInfoRequest(Message):
c_param = {"access_token": SINGLE_OPTIONAL_STRING}
class AuthorizationResponse(message.AuthorizationResponse, message.AccessTokenResponse):
c_param = message.AuthorizationResponse.c_param.copy()
c_param.update(message.AccessTokenResponse.c_param)
c_param.update(
{
"code": SINGLE_OPTIONAL_STRING,
"access_token": SINGLE_OPTIONAL_STRING,
"token_type": SINGLE_OPTIONAL_STRING,
"id_token": SINGLE_OPTIONAL_IDTOKEN,
}
)
def verify(self, **kwargs):
super().verify(**kwargs)
if "aud" in self:
if "client_id" in kwargs:
# check that it's for me
if kwargs["client_id"] not in self["aud"]:
return False
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
if "access_token" in self:
if "token_type" not in self:
raise MissingRequiredValue("Missing token_type parameter", self)
return True
class AuthorizationErrorResponse(message.AuthorizationErrorResponse):
c_allowed_values = message.AuthorizationErrorResponse.c_allowed_values.copy()
c_allowed_values["error"].extend(
[
"interaction_required",
"login_required",
"session_selection_required",
"consent_required",
"invalid_request_uri",
"invalid_request_object",
"registration_not_supported",
"request_not_supported",
"request_uri_not_supported",
]
)
class AuthorizationRequest(message.AuthorizationRequest):
c_param = message.AuthorizationRequest.c_param.copy()
c_param.update(
{
"scope": REQUIRED_LIST_OF_SP_SEP_STRINGS,
"redirect_uri": SINGLE_REQUIRED_STRING,
"nonce": SINGLE_OPTIONAL_STRING,
"display": SINGLE_OPTIONAL_STRING,
"prompt": OPTIONAL_LIST_OF_STRINGS,
"max_age": SINGLE_OPTIONAL_INT,
"ui_locales": OPTIONAL_LIST_OF_SP_SEP_STRINGS,
"claims_locales": OPTIONAL_LIST_OF_SP_SEP_STRINGS,
"id_token_hint": SINGLE_OPTIONAL_STRING,
"login_hint": SINGLE_OPTIONAL_STRING,
"acr_values": OPTIONAL_LIST_OF_SP_SEP_STRINGS,
"claims": SINGLE_OPTIONAL_CLAIMSREQ,
"registration": SINGLE_OPTIONAL_JSON,
"request": SINGLE_OPTIONAL_STRING,
"request_uri": SINGLE_OPTIONAL_STRING,
"response_mode": SINGLE_OPTIONAL_STRING,
}
)
c_allowed_values = message.AuthorizationRequest.c_allowed_values.copy()
c_allowed_values.update(
{
"display": ["page", "popup", "touch", "wap"],
"prompt": ["none", "login", "consent", "select_account"],
}
)
def verify(self, **kwargs):
"""
Check that the request is valid.
Authorization Request parameters that are OPTIONAL in the OAuth 2.0
specification MAY be included in the OpenID Request Object without also
passing them as OAuth 2.0 Authorization Request parameters, with one
exception: The scope parameter MUST always be present in OAuth 2.0
Authorization Request parameters.
All parameter values that are present both in the OAuth 2.0
Authorization Request and in the OpenID Request Object MUST exactly match.
"""
super().verify(**kwargs)
args = {}
for arg in ["key", "keyjar", "opponent_id", "sender"]:
try:
args[arg] = kwargs[arg]
except KeyError:
pass
if "opponent_id" not in kwargs:
args["opponent_id"] = self["client_id"]
if "request" in self:
if isinstance(self["request"], str):
# Try to decode the JWT, checks the signature
oidr = OpenIDRequest().from_jwt(str(self["request"]), **args)
# verify that nothing is change in the original message
for key, val in oidr.items():
if key in self and self[key] != val:
raise AssertionError()
# replace the JWT with the parsed and verified instance
self["request"] = oidr
if "id_token_hint" in self:
if isinstance(self["id_token_hint"], str):
idt = IdToken().from_jwt(str(self["id_token_hint"]), **args)
self["id_token_hint"] = idt
if "response_type" not in self:
raise MissingRequiredAttribute("response_type missing", self)
_rt = self["response_type"]
if "token" in _rt or "id_token" in _rt:
if "nonce" not in self:
raise MissingRequiredAttribute("Nonce missing", self)
if "openid" not in self.get("scope", []):
raise MissingRequiredValue("openid not in scope", self)
if "offline_access" in self.get("scope", []):
if "prompt" not in self or "consent" not in self["prompt"]:
raise MissingRequiredValue("consent in prompt", self)
if "prompt" in self:
if "none" in self["prompt"] and len(self["prompt"]) > 1:
raise InvalidRequest("prompt none combined with other value", self)
return True
class AccessTokenRequest(message.AccessTokenRequest):
c_param = message.AccessTokenRequest.c_param.copy()
c_param.update(
{
"client_assertion_type": SINGLE_OPTIONAL_STRING,
"client_assertion": SINGLE_OPTIONAL_STRING,
}
)
c_default = {"grant_type": "authorization_code"}
c_allowed_values = {
"client_assertion_type": [
"urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
]
}
class AddressClaim(Message):
c_param = {
"formatted": SINGLE_OPTIONAL_STRING,
"street_address": SINGLE_OPTIONAL_STRING,
"locality": SINGLE_OPTIONAL_STRING,
"region": SINGLE_OPTIONAL_STRING,
"postal_code": SINGLE_OPTIONAL_STRING,
"country": SINGLE_OPTIONAL_STRING,
}
class OpenIDSchema(Message):
c_param = {
"sub": SINGLE_REQUIRED_STRING,
"name": SINGLE_OPTIONAL_STRING,
"given_name": SINGLE_OPTIONAL_STRING,
"family_name": SINGLE_OPTIONAL_STRING,
"middle_name": SINGLE_OPTIONAL_STRING,
"nickname": SINGLE_OPTIONAL_STRING,
"preferred_username": SINGLE_OPTIONAL_STRING,
"profile": SINGLE_OPTIONAL_STRING,
"picture": SINGLE_OPTIONAL_STRING,
"website": SINGLE_OPTIONAL_STRING,
"email": SINGLE_OPTIONAL_STRING,
"email_verified": SINGLE_OPTIONAL_BOOLEAN,
"gender": SINGLE_OPTIONAL_STRING,
"birthdate": SINGLE_OPTIONAL_STRING,
"zoneinfo": SINGLE_OPTIONAL_STRING,
"locale": SINGLE_OPTIONAL_STRING,
"phone_number": SINGLE_OPTIONAL_STRING,
"phone_number_verified": SINGLE_OPTIONAL_BOOLEAN,
"address": OPTIONAL_ADDRESS,
"updated_at": SINGLE_OPTIONAL_INT,
"_claim_names": OPTIONAL_MESSAGE,
"_claim_sources": OPTIONAL_MESSAGE,
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "birthdate" in self:
# Either YYYY-MM-DD or just YYYY or 0000-MM-DD
try:
time.strptime(self["birthdate"], "%Y-%m-%d")
except ValueError:
try:
time.strptime(self["birthdate"], "%Y")
except ValueError:
try:
time.strptime(self["birthdate"], "0000-%m-%d")
except ValueError:
raise VerificationError("Birthdate format error", self)
if any(val is None for val in self.values()):
return False
return True
class RegistrationRequest(Message):
c_param = {
"redirect_uris": REQUIRED_LIST_OF_STRINGS,
"response_types": OPTIONAL_LIST_OF_STRINGS,
"grant_types": OPTIONAL_LIST_OF_STRINGS,
"application_type": SINGLE_OPTIONAL_STRING,
"contacts": OPTIONAL_LIST_OF_STRINGS,
"client_name": SINGLE_OPTIONAL_STRING,
"logo_uri": SINGLE_OPTIONAL_STRING,
"client_uri": SINGLE_OPTIONAL_STRING,
"policy_uri": SINGLE_OPTIONAL_STRING,
"tos_uri": SINGLE_OPTIONAL_STRING,
"jwks": SINGLE_OPTIONAL_STRING,
"jwks_uri": SINGLE_OPTIONAL_STRING,
"sector_identifier_uri": SINGLE_OPTIONAL_STRING,
"subject_type": SINGLE_OPTIONAL_STRING,
"id_token_signed_response_alg": SINGLE_OPTIONAL_STRING,
"id_token_encrypted_response_alg": SINGLE_OPTIONAL_STRING,
"id_token_encrypted_response_enc": SINGLE_OPTIONAL_STRING,
"userinfo_signed_response_alg": SINGLE_OPTIONAL_STRING,
"userinfo_encrypted_response_alg": SINGLE_OPTIONAL_STRING,
"userinfo_encrypted_response_enc": SINGLE_OPTIONAL_STRING,
"request_object_signing_alg": SINGLE_OPTIONAL_STRING,
"request_object_encryption_alg": SINGLE_OPTIONAL_STRING,
"request_object_encryption_enc": SINGLE_OPTIONAL_STRING,
"token_endpoint_auth_method": SINGLE_OPTIONAL_STRING,
"token_endpoint_auth_signing_alg": SINGLE_OPTIONAL_STRING,
"default_max_age": SINGLE_OPTIONAL_INT,
"require_auth_time": OPTIONAL_LOGICAL,
"default_acr_values": OPTIONAL_LIST_OF_STRINGS,
"initiate_login_uri": SINGLE_OPTIONAL_STRING,
"request_uris": OPTIONAL_LIST_OF_STRINGS,
"post_logout_redirect_uris": OPTIONAL_LIST_OF_STRINGS,
"frontchannel_logout_uri": SINGLE_OPTIONAL_STRING,
"frontchannel_logout_session_required": OPTIONAL_LOGICAL,
"backchannel_logout_uri": SINGLE_OPTIONAL_STRING,
"backchannel_logout_session_required": OPTIONAL_LOGICAL,
}
c_default = {"application_type": "web", "response_types": ["code"]}
c_allowed_values = {
"application_type": ["native", "web"],
"subject_type": ["public", "pairwise"],
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "initiate_login_uri" in self and not self["initiate_login_uri"].startswith(
"https:"
):
raise AssertionError()
for param in [
"request_object_encryption",
"id_token_encrypted_response",
"userinfo_encrypted_response",
]:
alg_param = "%s_alg" % param
enc_param = "%s_enc" % param
if alg_param in self:
if enc_param not in self:
self[enc_param] = "A128CBC-HS256"
# both or none
if enc_param in self and alg_param not in self:
raise AssertionError()
if (
"token_endpoint_auth_signing_alg" in self
and self["token_endpoint_auth_signing_alg"] == "none"
):
raise AssertionError()
return True
class RegistrationResponse(Message):
"""Response to client_register registration requests."""
c_param = {
"client_id": SINGLE_REQUIRED_STRING,
"client_secret": SINGLE_OPTIONAL_STRING,
"registration_access_token": SINGLE_OPTIONAL_STRING,
"registration_client_uri": SINGLE_OPTIONAL_STRING,
"client_id_issued_at": SINGLE_OPTIONAL_INT,
"client_secret_expires_at": SINGLE_OPTIONAL_INT,
}
c_param.update(RegistrationRequest.c_param)
def verify(self, **kwargs):
"""
Verify that the response is valid.
Implementations MUST either return both a Client Configuration Endpoint
and a Registration Access Token or neither of them.
:param kwargs:
:return: True if the message is OK otherwise False
"""
super(RegistrationResponse, self).verify(**kwargs)
has_reg_uri = "registration_client_uri" in self
has_reg_at = "registration_access_token" in self
if has_reg_uri != has_reg_at:
raise VerificationError(
(
"Only one of registration_client_uri"
" and registration_access_token present"
),
self,
)
return True
class ClientRegistrationErrorResponse(message.ErrorResponse):
c_allowed_values = {
"error": [
"invalid_redirect_uri",
"invalid_client_metadata",
"invalid_configuration_parameter",
]
}
class IdToken(OpenIDSchema):
c_param = OpenIDSchema.c_param.copy()
c_param.update(
{
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_REQUIRED_STRING,
"aud": REQUIRED_LIST_OF_STRINGS, # Array of strings or string
"exp": SINGLE_REQUIRED_INT,
"iat": SINGLE_REQUIRED_INT,
"auth_time": SINGLE_OPTIONAL_INT,
"nonce": SINGLE_OPTIONAL_STRING,
"at_hash": SINGLE_OPTIONAL_STRING,
"c_hash": SINGLE_OPTIONAL_STRING,
"acr": SINGLE_OPTIONAL_STRING,
"amr": OPTIONAL_LIST_OF_STRINGS,
"azp": SINGLE_OPTIONAL_STRING,
"sub_jwk": SINGLE_OPTIONAL_STRING,
}
)
def verify(self, **kwargs):
super(IdToken, self).verify(**kwargs)
try:
if kwargs["iss"] != self["iss"]:
raise IssuerMismatch("{} != {}".format(kwargs["iss"], self["iss"]))
except KeyError:
pass
if "aud" in self:
if "client_id" in kwargs:
# check that I'm among the recipients
if kwargs["client_id"] not in self["aud"]:
raise NotForMe(
"{} not in aud:{}".format(kwargs["client_id"], self["aud"]),
self,
)
# Then azp has to be present and be one of the aud values
if len(self["aud"]) > 1:
if "azp" not in self:
raise VerificationError("azp missing", self)
if self["azp"] not in self["aud"]:
raise VerificationError("Mismatch between azp and aud claims", self)
if "azp" in self:
if "client_id" in kwargs:
if kwargs["client_id"] != self["azp"]:
raise NotForMe(
"{} != azp:{}".format(kwargs["client_id"], self["azp"]), self
)
_now = time_util.utc_time_sans_frac()
try:
_skew = kwargs["skew"]
except KeyError:
_skew = 0
try:
_exp = self["exp"]
except KeyError:
raise MissingRequiredAttribute("exp")
else:
if (_now - _skew) > _exp:
raise EXPError("Invalid expiration time")
try:
_storage_time = kwargs["nonce_storage_time"]
except KeyError:
_storage_time = NONCE_STORAGE_TIME
try:
_iat = self["iat"]
except KeyError:
raise MissingRequiredAttribute("iat")
else:
if (_iat + _storage_time) < (_now - _skew):
raise IATError("Issued too long ago")
return True
class StateFullMessage(Message):
c_param = {"state": SINGLE_REQUIRED_STRING}
class RefreshSessionRequest(StateFullMessage):
c_param = StateFullMessage.c_param.copy()
c_param.update(
{"id_token": SINGLE_REQUIRED_STRING, "redirect_url": SINGLE_REQUIRED_STRING}
)
def verify(self, **kwargs):
super(RefreshSessionRequest, self).verify(**kwargs)
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
class RefreshSessionResponse(StateFullMessage):
c_param = StateFullMessage.c_param.copy()
c_param.update({"id_token": SINGLE_REQUIRED_STRING})
def verify(self, **kwargs):
super(RefreshSessionResponse, self).verify(**kwargs)
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
class CheckSessionRequest(Message):
c_param = {"id_token": SINGLE_REQUIRED_STRING}
def verify(self, **kwargs):
super(CheckSessionRequest, self).verify(**kwargs)
if "id_token" in self:
self["id_token"] = verify_id_token(self, check_hash=True, **kwargs)
class CheckIDRequest(Message):
c_param = {"access_token": SINGLE_REQUIRED_STRING}
class EndSessionRequest(Message):
c_param = {
"id_token_hint": SINGLE_OPTIONAL_STRING,
"post_logout_redirect_uri": SINGLE_OPTIONAL_STRING,
"state": SINGLE_OPTIONAL_STRING,
}
class EndSessionResponse(Message):
c_param = {"state": SINGLE_OPTIONAL_STRING}
class Claims(Message):
pass
class ClaimsRequest(Message):
c_param = {
"userinfo": OPTIONAL_MULTIPLE_Claims,
"id_token": OPTIONAL_MULTIPLE_Claims,
}
class OpenIDRequest(AuthorizationRequest):
pass
class ProviderConfigurationResponse(Message):
c_param = {
"issuer": SINGLE_REQUIRED_STRING,
"authorization_endpoint": SINGLE_REQUIRED_STRING,
"token_endpoint": SINGLE_OPTIONAL_STRING,
"userinfo_endpoint": SINGLE_OPTIONAL_STRING,
"jwks_uri": SINGLE_REQUIRED_STRING,
"registration_endpoint": SINGLE_OPTIONAL_STRING,
"scopes_supported": OPTIONAL_LIST_OF_STRINGS,
"response_types_supported": REQUIRED_LIST_OF_STRINGS,
"response_modes_supported": OPTIONAL_LIST_OF_STRINGS,
"grant_types_supported": OPTIONAL_LIST_OF_STRINGS,
"acr_values_supported": OPTIONAL_LIST_OF_STRINGS,
"subject_types_supported": REQUIRED_LIST_OF_STRINGS,
"id_token_signing_alg_values_supported": REQUIRED_LIST_OF_STRINGS,
"id_token_encryption_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"id_token_encryption_enc_values_supported": OPTIONAL_LIST_OF_STRINGS,
"userinfo_signing_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"userinfo_encryption_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"userinfo_encryption_enc_values_supported": OPTIONAL_LIST_OF_STRINGS,
"request_object_signing_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"request_object_encryption_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"request_object_encryption_enc_values_supported": OPTIONAL_LIST_OF_STRINGS,
"token_endpoint_auth_methods_supported": OPTIONAL_LIST_OF_STRINGS,
"token_endpoint_auth_signing_alg_values_supported": OPTIONAL_LIST_OF_STRINGS,
"display_values_supported": OPTIONAL_LIST_OF_STRINGS,
"claim_types_supported": OPTIONAL_LIST_OF_STRINGS,
"claims_supported": OPTIONAL_LIST_OF_STRINGS,
"service_documentation": SINGLE_OPTIONAL_STRING,
"claims_locales_supported": OPTIONAL_LIST_OF_STRINGS,
"ui_locales_supported": OPTIONAL_LIST_OF_STRINGS,
"claims_parameter_supported": SINGLE_OPTIONAL_BOOLEAN,
"request_parameter_supported": SINGLE_OPTIONAL_BOOLEAN,
"request_uri_parameter_supported": SINGLE_OPTIONAL_BOOLEAN,
"require_request_uri_registration": SINGLE_OPTIONAL_BOOLEAN,
"op_policy_uri": SINGLE_OPTIONAL_STRING,
"op_tos_uri": SINGLE_OPTIONAL_STRING,
"check_session_iframe": SINGLE_OPTIONAL_STRING,
"end_session_endpoint": SINGLE_OPTIONAL_STRING,
"frontchannel_logout_supported": SINGLE_OPTIONAL_BOOLEAN,
"frontchannel_logout_session_supported": SINGLE_OPTIONAL_BOOLEAN,
"backchannel_logout_supported": SINGLE_OPTIONAL_BOOLEAN,
"backchannel_logout_session_supported": SINGLE_OPTIONAL_BOOLEAN,
}
c_default = {
"version": "3.0",
"token_endpoint_auth_methods_supported": ["client_secret_basic"],
"claims_parameter_supported": False,
"request_parameter_supported": False,
"request_uri_parameter_supported": True,
"require_request_uri_registration": False,
"grant_types_supported": ["authorization_code", "implicit"],
"frontchannel_logout_supported": False,
"frontchannel_logout_session_supported": False,
"backchannel_logout_supported": False,
"backchannel_logout_session_supported": False,
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "scopes_supported" in self:
if "openid" not in self["scopes_supported"]:
raise AssertionError()
for scope in self["scopes_supported"]:
check_char_set(scope, SCOPE_CHARSET)
parts = urlparse(self["issuer"])
if parts.scheme != "https":
raise SchemeError("Not HTTPS")
if parts.query or parts.fragment:
raise AssertionError()
if (
any("code" in rt for rt in self["response_types_supported"])
and "token_endpoint" not in self
):
raise MissingRequiredAttribute("token_endpoint")
return True
class AuthnToken(Message):
c_param = {
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_REQUIRED_STRING,
"aud": REQUIRED_LIST_OF_STRINGS, # Array of strings or string
"jti": SINGLE_REQUIRED_STRING,
"exp": SINGLE_REQUIRED_INT,
"iat": SINGLE_OPTIONAL_INT,
}
# According to RFC 7519 all claims are optional
class JasonWebToken(Message):
c_param = {
"iss": SINGLE_OPTIONAL_STRING,
"sub": SINGLE_OPTIONAL_STRING,
"aud": OPTIONAL_LIST_OF_STRINGS, # Array of strings or string
"exp": SINGLE_OPTIONAL_INT,
"nbf": SINGLE_OPTIONAL_INT,
"iat": SINGLE_OPTIONAL_INT,
"jti": SINGLE_OPTIONAL_STRING,
}
def jwt_deser(val, sformat="json"):
if sformat == "urlencoded":
sformat = "json"
if sformat in ["dict", "json"]:
if not isinstance(val, str):
val = json.dumps(val)
sformat = "json"
return JasonWebToken().deserialize(val, sformat)
SINGLE_OPTIONAL_JWT = ParamDefinition(Message, False, msg_ser, jwt_deser, False)
class UserInfoErrorResponse(message.ErrorResponse):
c_allowed_values = {
"error": [
"invalid_schema",
"invalid_request",
"invalid_token",
"insufficient_scope",
]
}
class DiscoveryRequest(Message):
c_param = {"principal": SINGLE_REQUIRED_STRING, "service": SINGLE_REQUIRED_STRING}
class DiscoveryResponse(Message):
c_param = {"locations": REQUIRED_LIST_OF_STRINGS}
class ResourceRequest(Message):
c_param = {"access_token": SINGLE_OPTIONAL_STRING}
SCOPE2CLAIMS = {
"openid": ["sub"],
"profile": [
"name",
"given_name",
"family_name",
"middle_name",
"nickname",
"profile",
"picture",
"website",
"gender",
"birthdate",
"zoneinfo",
"locale",
"updated_at",
"preferred_username",
],
"email": ["email", "email_verified"],
"address": ["address"],
"phone": ["phone_number", "phone_number_verified"],
"offline_access": [],
} # type: Dict[str, List[str]]
# LOGOUT related messages
SINGLE_OPTIONAL_JSON = ParamDefinition(dict, False, json_ser, json_deser, False)
SINGLE_REQUIRED_JSON = ParamDefinition(dict, True, json_ser, json_deser, False)
BACK_CHANNEL_LOGOUT_EVENT = "http://schemas.openid.net/event/backchannel-logout"
class LogoutToken(Message):
"""Defined in https://openid.net/specs/openid-connect-backchannel-1_0.html#LogoutToken ."""
c_param = {
"iss": SINGLE_REQUIRED_STRING,
"sub": SINGLE_OPTIONAL_STRING,
"aud": REQUIRED_LIST_OF_STRINGS, # Array of strings or string
"iat": SINGLE_REQUIRED_INT,
"jti": SINGLE_REQUIRED_STRING,
"events": SINGLE_REQUIRED_JSON,
"sid": SINGLE_OPTIONAL_STRING,
}
def verify(self, **kwargs):
super().verify(**kwargs)
if "nonce" in self:
raise MessageException(
'"nonce" is prohibited from appearing in a LogoutToken.'
)
# Check the 'events' JSON
_keys = list(self["events"].keys())
if len(_keys) != 1:
raise ValueError('Must only be one member in "events"')
if _keys[0] != BACK_CHANNEL_LOGOUT_EVENT:
raise ValueError('Wrong member in "events"')
if self["events"][_keys[0]] != {}:
raise ValueError('Wrong member value in "events"')
# There must be either a 'sub' or a 'sid', and may contain both
if not ("sub" in self or "sid" in self):
raise ValueError('There MUST be either a "sub" or a "sid"')
try:
if kwargs["aud"] not in self["aud"]:
raise NotForMe("Not among intended audience")
except KeyError:
pass
try:
if kwargs["iss"] != self["iss"]:
raise NotForMe("Wrong issuer")
except KeyError:
pass
_now = utc_time_sans_frac()
_skew = kwargs.get("skew", 0)
_iat = self.get("iat", 0)
if _iat and _iat > (_now + _skew):
raise ValueError("Invalid issued_at time")
return True
ID_TOKEN_VERIFY_ARGS = [
"keyjar",
"verify",
"encalg",
"encenc",
"sigalg",
"issuer",
"allow_missing_kid",
"no_kid_issuer",
"trusting",
"skew",
"nonce_storage_time",
"client_id",
]
class BackChannelLogoutRequest(Message):
"""Defines the message used in https://openid.net/specs/openid-connect-backchannel-1_0.html ."""
c_param = {"logout_token": SINGLE_REQUIRED_STRING}
def verify(self, **kwargs):
super().verify(**kwargs)
args = {arg: kwargs[arg] for arg in TOKEN_VERIFY_ARGS if arg in kwargs}
logout_token = LogoutToken().from_jwt(str(self["logout_token"]), **args)
logout_token.verify(**kwargs)
self["logout_token"] = logout_token
logger.info("Verified Logout Token: {}".format(logout_token.to_dict()))
return True
class FrontChannelLogoutRequest(Message):
"""Defines the message used in https://openid.net/specs/openid-connect-frontchannel-1_0.html ."""
c_param = {"iss": SINGLE_OPTIONAL_STRING, "sid": SINGLE_OPTIONAL_STRING}
MSG = {
"RefreshAccessTokenRequest": RefreshAccessTokenRequest,
"TokenErrorResponse": TokenErrorResponse,
"AccessTokenResponse": AccessTokenResponse,
"UserInfoRequest": UserInfoRequest,
"AuthorizationResponse": AuthorizationResponse,
"AuthorizationErrorResponse": AuthorizationErrorResponse,
"AuthorizationRequest": AuthorizationRequest,
"AccessTokenRequest": AccessTokenRequest,
"AddressClaim": AddressClaim,
"OpenIDSchema": OpenIDSchema,
"RegistrationRequest": RegistrationRequest,
"RegistrationResponse": RegistrationResponse,
"ClientRegistrationErrorResponse": ClientRegistrationErrorResponse,
"IdToken": IdToken,
"RefreshSessionRequest": RefreshSessionRequest,
"RefreshSessionResponse": RefreshSessionResponse,
"CheckSessionRequest": CheckSessionRequest,
"CheckIDRequest": CheckIDRequest,
"EndSessionRequest": EndSessionRequest,
"EndSessionResponse": EndSessionResponse,
"Claims": Claims,
"OpenIDRequest": OpenIDRequest,
"ProviderConfigurationResponse": ProviderConfigurationResponse,
"AuthnToken": AuthnToken,
"UserInfoErrorResponse": UserInfoErrorResponse,
"DiscoveryRequest": DiscoveryRequest,
"DiscoveryResponse": DiscoveryResponse,
"ResourceRequest": ResourceRequest,
# LOGOUT messages
"LogoutToken": LogoutToken,
"BackChannelLogoutRequest": BackChannelLogoutRequest,
"FrontChannelLogoutRequest": FrontChannelLogoutRequest,
}
def factory(msgtype):
warnings.warn(
"`factory` is deprecated. Use `OIDCMessageFactory` instead.", DeprecationWarning
)
for _, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and issubclass(obj, Message):
try:
if obj.__name__ == msgtype:
return obj
except AttributeError:
pass
# Fall back to basic OAuth2 messages
return message.factory(msgtype)
class OIDCMessageFactory(MessageFactory):
"""Factory that knows OIDC message types."""
authorization_endpoint = MessageTuple(AuthorizationRequest, AuthorizationResponse)
token_endpoint = MessageTuple(AccessTokenRequest, AccessTokenResponse)
refresh_endpoint = MessageTuple(RefreshAccessTokenRequest, AccessTokenResponse)
resource_endpoint = MessageTuple(ResourceRequest, Message)
configuration_endpoint = MessageTuple(Message, ProviderConfigurationResponse)
userinfo_endpoint = MessageTuple(UserInfoRequest, Message)
registration_endpoint = MessageTuple(RegistrationRequest, RegistrationResponse)
endsession_endpoint = MessageTuple(EndSessionRequest, EndSessionResponse)
checkid_endpoint = MessageTuple(CheckIDRequest, IdToken)
checksession_endpoint = MessageTuple(CheckSessionRequest, IdToken)
refreshsession_endpoint = MessageTuple(
RefreshSessionRequest, RefreshSessionResponse
)
discovery_endpoint = MessageTuple(DiscoveryRequest, DiscoveryResponse)
| 33.288462 | 101 | 0.643232 |
f2df0ae091b8fb226a01c5e5267ee34e6849f2c1 | 9,971 | py | Python | tools/visual_utils/visualize_utils.py | skyhehe123/VoxSeT | 7c8fe7eecddcb9ee073ea0ceb5be85767af03b4d | [
"MIT"
] | 39 | 2022-03-22T03:09:38.000Z | 2022-03-30T15:40:02.000Z | tools/visual_utils/visualize_utils.py | skyhehe123/VoxSeT | 7c8fe7eecddcb9ee073ea0ceb5be85767af03b4d | [
"MIT"
] | 3 | 2022-03-22T13:40:54.000Z | 2022-03-26T11:20:20.000Z | tools/visual_utils/visualize_utils.py | skyhehe123/VoxSeT | 7c8fe7eecddcb9ee073ea0ceb5be85767af03b4d | [
"MIT"
] | 2 | 2022-03-27T13:49:37.000Z | 2022-03-30T08:34:07.000Z | import mayavi.mlab as mlab
import numpy as np
import torch
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def check_numpy_to_torch(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
show_intensity=False, size=(600, 600), draw_origin=True):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
if show_intensity:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
else:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
if draw_origin:
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)
return fig
def draw_sphere_pts(pts, color=(0, 1, 0), fig=None, bgcolor=(0, 0, 0), scale_factor=0.2):
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(600, 600))
if isinstance(color, np.ndarray) and color.shape[0] == 1:
color = color[0]
color = (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
if isinstance(color, np.ndarray):
pts_color = np.zeros((pts.__len__(), 4), dtype=np.uint8)
pts_color[:, 0:3] = color
pts_color[:, 3] = 255
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], np.arange(0, pts_color.__len__()), mode='sphere',
scale_factor=scale_factor, figure=fig)
G.glyph.color_mode = 'color_by_scalar'
G.glyph.scale_mode = 'scale_by_vector'
G.module_manager.scalar_lut_manager.lut.table = pts_color
else:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='sphere', color=color,
colormap='gnuplot', scale_factor=scale_factor, figure=fig)
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), line_width=3, tube_radius=None, figure=fig)
return fig
def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
return fig
def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):
for x in range(bv_range[0], bv_range[2], grid_size):
for y in range(bv_range[1], bv_range[3], grid_size):
fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)
return fig
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
if not isinstance(points, np.ndarray):
points = points.cpu().numpy()
if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
ref_boxes = ref_boxes.cpu().numpy()
if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
gt_boxes = gt_boxes.cpu().numpy()
if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
ref_scores = ref_scores.cpu().numpy()
if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
ref_labels = ref_labels.cpu().numpy()
fig = visualize_pts(points)
fig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40))
if gt_boxes is not None:
corners3d = boxes_to_corners_3d(gt_boxes)
fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100)
if ref_boxes is not None and len(ref_boxes) > 0:
ref_corners3d = boxes_to_corners_3d(ref_boxes)
if ref_labels is None:
fig = draw_corners3d(ref_corners3d, fig=fig, color=(0, 1, 0), cls=ref_scores, max_num=100)
else:
for k in range(ref_labels.min(), ref_labels.max() + 1):
cur_color = tuple(box_colormap[k % len(box_colormap)])
mask = (ref_labels == k)
fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=100)
mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0)
return fig
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
import mayavi.mlab as mlab
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
def visualize_offscreen(pts, boxes, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
show_intensity=False, size=(1000, 1000), draw_origin=True, save_path='test.png'):
import numpy as np
from pyvirtualdisplay import Display
display = Display(visible=False, size=(1280, 1024))
display.start()
import mayavi.mlab as mlab
mlab.options.offscreen = True
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
if show_intensity:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
else:
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point',
colormap='gnuplot', scale_factor=1, figure=fig)
if draw_origin:
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)
mlab.view(azimuth=45, elevation=-60, focalpoint=[0, 0, 0], distance=200.0, figure=fig)
mlab.savefig(save_path)
display.stop()
return fig
| 39.411067 | 121 | 0.568449 |
71443192f9c3ef1fe897c86cdc04b9f28d8f099f | 4,991 | py | Python | docs/conf.py | sschwindt/tke-calculator | 4d28ae27a7a26842dd5df306bff56daafc100164 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | sschwindt/tke-calculator | 4d28ae27a7a26842dd5df306bff56daafc100164 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | sschwindt/tke-calculator | 4d28ae27a7a26842dd5df306bff56daafc100164 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from sphinx.locale import _
import sphinx_rtd_theme
import sys
import os
import re
import datetime
# If we are building locally, or the build on Read the Docs looks like a PR
# build, prefer to use the version of the theme in this repo, not the installed
# version of the theme.
def is_development_build():
# PR builds have an interger version
re_version = re.compile(r"^[\d]+$")
if "READTHEDOCS" in os.environ:
version = os.environ.get("READTHEDOCS_VERSION", "")
if re_version.match(version):
return True
return False
return True
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(".."))
# the following modules will be mocked (i.e. bogus imports - required for C-dependent packages)
autodoc_mock_imports = [
"matplotlib",
"numpy",
"pandas",
]
project = u"TKE Analyst"
slug = re.sub(r"\W+", "-", project.lower())
version = "1.0.0"
release = "latest"
author = u"sschwindt"
copyright = author
language = "en"
extensions = [
"sphinx.ext.intersphinx",
# "sphinxcontrib.bibtex",
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.githubpages",
"sphinx.ext.todo",
# "sphinxcontrib.googleanalytics",
]
templates_path = ["_templates"]
source_suffix = ".rst"
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
locale_dirs = ["locale/", "docs/"]
gettext_compact = False
master_doc = "index"
suppress_warnings = ["image.nonlocal_uri"]
pygments_style = "sphinx"
intersphinx_mapping = {
"python": ("https://docs.python.org/3.8", None),
"rtd": ("https://docs.readthedocs.io/en/latest/", None),
"sphinx": ("http://www.sphinx-doc.org/en/stable/", None),
}
nitpick_ignore = [
("py:class", "docutils.nodes.document"),
("py:class", "docutils.parsers.rst.directives.body.Sidebar"),
]
numfig = True
myst_admonition_enable = True
myst_deflist_enable = True
myst_url_schemes = ("http", "https", "mailto")
panels_add_bootstrap_css = False
html_theme = "sphinx_rtd_theme" # nature
html_theme_options = {
"theme_dev_mode": True,
"launch_buttons": {
"binderhub_url": "https://mybinder.org",
"thebe": True,
"notebook_interface": "jupyterlab",
"collapse_navigation": False
},
"repository_url": "https://github.com/sschwindt/tke-calculator",
"repository_branch": "main",
"use_edit_page_button": False,
"use_repository_button": True,
}
html_context = {
"date": datetime.date.today().strftime("%Y-%m-%d"),
"display_github": True,
"github_user": "sschwindt",
"github_repo": "tke-calculator",
"github_version": "main/",
"conf_py_path": "/docs/"
}
if not ("READTHEDOCS" in os.environ):
html_static_path = ["_static/"]
html_js_files = ["debug.js"]
# Add fake versions for local QA of the menu
html_context["test_versions"] = list(map(
lambda x: str(x / 10),
range(1, 100)
))
html_favicon = os.path.abspath("..") + "/docs/img/browser-icon.ico"
html_last_updated_fmt = ""
html_logo = os.path.abspath("..") + "/docs/img/icon.svg"
html_show_sourcelink = True
html_title = "TKE Analyst " + version
htmlhelp_basename = "TKE Analyst"
html_sourcelink_suffix = ""
thebe_config = {
"repository_url": "https://github.com/binder-examples/jupyter-stacks-datascience",
"repository_branch": "master",
}
latex_documents = [
(master_doc, "{0}.tex".format(slug), project, author, "manual"),
]
man_pages = [
(master_doc, slug, project, [author], 1)
]
# allow errors
execution_allow_errors = True
# execute cells only if any of the cells is missing output
jupyter_execute_notebooks = "auto"
texinfo_documents = [
(master_doc, slug, project, author, slug, project, "Miscellaneous"),
]
# Extensions to theme docs
def setup(app):
from sphinx.domains.python import PyField
from sphinx.util.docfields import Field
app.add_object_type(
"confval",
"confval",
objname="configuration value",
indextemplate="pair: %s; configuration value",
doc_field_types=[
PyField(
"type",
label=_("Type"),
has_arg=False,
names=("type",),
bodyrolename="class"
),
Field(
"default",
label=_("Default"),
has_arg=False,
names=("default",),
),
]
)
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
napoleon_custom_sections = None
| 25.994792 | 95 | 0.660789 |
bb66f86087ad4d9095df3c299aaebedbaf4c56ae | 7,860 | py | Python | tests/test_cdutil_VariableMatcher.py | CDAT/cdutil | 314c565bc2e0d2c15d233320dc4c5496a602479a | [
"BSD-3-Clause"
] | null | null | null | tests/test_cdutil_VariableMatcher.py | CDAT/cdutil | 314c565bc2e0d2c15d233320dc4c5496a602479a | [
"BSD-3-Clause"
] | 14 | 2018-04-09T13:10:12.000Z | 2020-09-27T08:02:05.000Z | tests/test_cdutil_VariableMatcher.py | CDAT/cdutil | 314c565bc2e0d2c15d233320dc4c5496a602479a | [
"BSD-3-Clause"
] | 1 | 2021-02-08T22:23:15.000Z | 2021-02-08T22:23:15.000Z | #!/usr/bin/env python
import cdutil,os,numpy,cdat_info
import unittest
import MV2
class CDUTIL(unittest.TestCase):
def testVariableMatcher1(self):
ref = os.path.join(cdat_info.get_sampledata_path(),'tas_dnm-95a.xml')
# Reference
Ref=cdutil.VariableConditioner(ref)
Ref.variable='tas'
Ref.id='JONES'# optional
# Test
tst = os.path.join(cdat_info.get_sampledata_path(),'tas_ccsr-95a.xml')
Tst=cdutil.VariableConditioner(tst)
Tst.variable='tas'
Tst.id='NCEP' #optional
# Final Grid
FG=cdutil.WeightedGridMaker()
FG.longitude.n=36
FG.longitude.first=0.
FG.longitude.delta=10.
FG.latitude.n=18
FG.latitude.first=-85.
FG.latitude.delta=10.
# Now creates the compare object.
c=cdutil.VariablesMatcher(Ref, Tst, weightedGridMaker=FG)
# And get it (3 different ways).
(ref, ref_frac), (test, test_frac) = c.get()
ref, test = c.get(returnTuple=0)
self.assertEqual(ref.shape, (72, 1, 18, 36))
self.assertEqual(test.shape, (72, 1, 18, 36))
self.assertTrue(numpy.allclose(ref.min(),194.175))
self.assertTrue(numpy.allclose(test.min(),210.83))
self.assertTrue(numpy.allclose(ref.max(),309.541))
self.assertTrue(numpy.allclose(test.max(),309.483))
ref, test = c(returnTuple=0)
self.assertEqual(ref.shape, (72, 1, 18, 36))
self.assertEqual(test.shape, (72, 1, 18, 36))
self.assertTrue(numpy.allclose(ref.min(),194.175))
self.assertTrue(numpy.allclose(test.min(),210.83))
self.assertTrue(numpy.allclose(ref.max(),309.541))
self.assertTrue(numpy.allclose(test.max(),309.483))
def testVariableMatcher2(self):
# First let's creates the mask (it is the same for NCEP and ECMWF since they are on the same grid).
refmsk = os.path.join(cdat_info.get_sampledata_path(),'sftlf_dnm.nc')
M=cdutil.WeightsMaker(refmsk, var='sftlf_dnm', values=[1.])
# Reference
ref = os.path.join(cdat_info.get_sampledata_path(),'tas_dnm-95a.xml')
Ref=cdutil.VariableConditioner(ref, weightsMaker=M)
Ref.variable='tas'
Ref.id='D1'
Ref.cdmsKeywords={'time':('1979','1980','co')}
# Test
tstmsk = os.path.join(cdat_info.get_sampledata_path(),'sftlf_ccsr.nc')
M=cdutil.WeightsMaker(tstmsk, var='sftlf_ccsr', values=[1.])
tst = os.path.join(cdat_info.get_sampledata_path(),'tas_ccsr-95a.xml')
Tst=cdutil.VariableConditioner(tst, weightsMaker=M)
Tst.variable='tas'
Tst.id='D2'
# External Variable (for the mask)
ext = ref
EV=cdutil.VariableConditioner(ext)
EV.variable='tas'
EV.id='OUT'
# Final Grid
# We need a mask for the final grid
fgmask=ext
M2=cdutil.WeightsMaker(source=refmsk, var='sftlf_dnm', values=[["input",100.]])
FG=cdutil.WeightedGridMaker(weightsMaker=M2)
FG.longitude.n=36
FG.longitude.first=0.
FG.longitude.delta=10.
FG.latitude.n=18
FG.latitude.first=-85.
FG.latitude.delta=10.
# Now creates the compare object
print("REF:",Ref())
c=cdutil.VariablesMatcher(Ref, Tst, weightedGridMaker=FG, externalVariableConditioner=EV)
# And gets it
(ref, reffrc), (test, tfrc) = c()
print('Shapes:', test.shape, ref.shape)
self.assertEqual(test.shape,ref.shape)
self.assertEqual(test.shape,(12,1,18,36))
def testVariableMatcher3(self):
"""This is a very complicated example that shows MOST of the options and power of VariablesMatcher.
Once again we retrieve NCEP and ECMWF (for 1981), but this time, they are both masked for land first.
ECMWF is then regridded to a T63 grid and NCEP to a T42 grid. There they are masked where the temperatures are less than 280K or more than 300K (in two different ways).
The JONES external variable is then applied for additional external masking (with a personal land mask).
Finally, everything is put on the 10x10 grid and masked for land.
Also a 'selector' for Northern Hemisphere is applied (see cdutil.region documentation)
"""
# First let's create the mask (it is the same for NCEP and ECMWF since they are on the same grid)
refmsk = os.path.join(cdat_info.get_sampledata_path(),'sftlf_dnm.nc')
M=cdutil.WeightsMaker(refmsk, var='sftlf_dnm', values=[1.])
# Reference
ref = os.path.join(cdat_info.get_sampledata_path(),'tas_dnm-95a.xml')
Ref=cdutil.VariableConditioner(ref, weightsMaker=M)
Ref.variable='tas'
Ref.id='ECMWF'
Ref.cdmsKeywords={'time':('1979','1980','co')}
# Ok now the final grid for this variable is a T63, masked where temperatures are not between 280K and 300K
ECMWFGrid=cdutil.WeightedGridMaker(source=refmsk,var='sftlf_dnm')
ECMWFinalMask=cdutil.WeightsMaker()
ECMWFinalMask.values=[('input',280.),('input',300.)]
ECMWFinalMask.actions=[MV2.less, MV2.greater]
# Associate the mask with the grid
ECMWFGrid.weightsMaker=ECMWFinalMask
# Now associates the grid with the variable.
Ref.weightedGridMaker=ECMWFGrid
# Test
tstmsk = os.path.join(cdat_info.get_sampledata_path(),'sftlf_ccsr.nc')
M=cdutil.WeightsMaker(tstmsk, var='sftlf_ccsr', values=[1.])
tst = os.path.join(cdat_info.get_sampledata_path(),'tas_ccsr-95a.xml')
Tst=cdutil.VariableConditioner(tst, weightsMaker=M)
Tst.variable='tas'
Tst.id='NCEP'
# Ok now the final grid for this variable is a T42, masked where temperatures are not between 280K and 300K
NCEPGrid=cdutil.WeightedGridMaker()
NCEPGrid.latitude.n=64
NCEPGrid.latitude.type='gaussian'
# Ok now let's create a function to return the mask
def myMakeMask(array, range):
"""Returns the input array masked where the values are not between range[0] and range[1]"""
m1=MV2.less (array, range[0]) # mask where it is less than the 1st value
m2=MV2.greater(array, range[1]) # mask where it is more than the 2nd value
return MV2.logical_or(m1,m2)
# And associate the mask with the grid
NCEPGrid.weightsMaker.values=[('input',(280.,300.))]
NCEPGrid.weightsMaker.actions=[myMakeMask]
# Now associates the grid with the variable.
Tst.weightedGridMaker=NCEPGrid
# External Variable
ext = ref
extmask = refmsk
EMask=cdutil.WeightsMaker(source=extmask, var='sftlf_dnm')
ED=cdutil.VariableConditioner(ext, weightsMaker=EMask)
ED.variable='tas'
ED.id='JONES'
# Final Grid
# We need a mask for the final grid
fgmask=os.path.join(cdat_info.get_sampledata_path(),'sftlf_10x10.nc')
M2=cdutil.WeightsMaker(source=fgmask, var='sftlf', values=[100.])
FG=cdutil.WeightedGridMaker(weightsMaker=M2)
FG.longitude.n=36
FG.longitude.first=0.
FG.longitude.delta=10.
FG.latitude.n=18
FG.latitude.first=-85.
FG.latitude.delta=10.
# Now creates the compare object
c=cdutil.VariablesMatcher(Ref, Tst, weightedGridMaker=FG, externalVariableConditioner=ED)
c.cdmsArguments=[cdutil.region.NH]
#print c
# And gets it
(ref, reffrc), (test, tfrc) = c()
print('Shapes:', test.shape, ref.shape)
print('Shapes:', tfrc.shape, reffrc.shape)
self.assertEqual(ref.shape,reffrc.shape)
self.assertEqual(test.shape,tfrc.shape)
self.assertEqual(test.shape,ref.shape)
self.assertEqual(test.shape,(12, 1, 9, 36))
| 43.910615 | 176 | 0.643384 |
1d44b20e47585593a4fc649de84d11ad1b7424de | 926 | py | Python | .ipynb_checkpoints/app-checkpoint.py | skohtz1/web-scrapingHW | 11cf4686286a4fa51ef23a9e0afc5adca21f40c1 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/app-checkpoint.py | skohtz1/web-scrapingHW | 11cf4686286a4fa51ef23a9e0afc5adca21f40c1 | [
"MIT"
] | null | null | null | .ipynb_checkpoints/app-checkpoint.py | skohtz1/web-scrapingHW | 11cf4686286a4fa51ef23a9e0afc5adca21f40c1 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, jsonify, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
mongo = PyMongo(app)
@app.route("/")
def index():
listings = mongo.db.listings_mars.find()
return render_template("index.html", listings=listings)
@app.route("/clear")
def clear_listings():
mongo.db.listings_mars.drop()
return redirect("http://127.0.0.1:5000/", code=302)
@app.route("/scrape")
def scrape():
listings = mongo.db.listings_mars
mars_data_dict = scrape_mars.scrape()
#for listing in mars_data_dict:
listings.update({'weather':listing['weather'], 'hemisphere':listing['hemisphere'],'feature_image':listing['feature_image'],'title_feature':listing['title_feature']}, listing, upsert=True)
#listings.insert_one(mars_data_dict)
return redirect("http://127.0.0.1:5000/", code=302)
if __name__ == "__main__":
app.run(debug=True) | 26.457143 | 191 | 0.717063 |
a62770f40bc2ce595851d29271fdcc5c57e3ea48 | 480 | py | Python | aulaszeus/extensions.py | zerossB/aulas_zeus | 002fc29578aed3aa36a9b400882e638b68c123ea | [
"BSD-3-Clause"
] | null | null | null | aulaszeus/extensions.py | zerossB/aulas_zeus | 002fc29578aed3aa36a9b400882e638b68c123ea | [
"BSD-3-Clause"
] | null | null | null | aulaszeus/extensions.py | zerossB/aulas_zeus | 002fc29578aed3aa36a9b400882e638b68c123ea | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
aulaszeus.extensions
~~~~~~~~~~~~~~~~~~~~~~~~~~
AulasZeus extensions module
:copyright: (c) 2016 by Haynesss.
:license: BSD, see LICENSE for more details.
"""
from flask_mongoengine import MongoEngine
from flask_mail import Mail
from flask_assets import Environment
from flask_cache import Cache
from flask_bootstrap import Bootstrap
mail = Mail()
db = MongoEngine()
assets = Environment()
cache = Cache()
bootstrap = Bootstrap() | 22.857143 | 48 | 0.69375 |
2c48a8999b0bde009c672cd810a3dde1559c613d | 1,595 | py | Python | classicEvoAlg.py | kondiiq/FIR-filters-with-evolutionally-algorithms | 28605f3eb0e061e834b944b9bfbaf6cded9347d0 | [
"MIT"
] | null | null | null | classicEvoAlg.py | kondiiq/FIR-filters-with-evolutionally-algorithms | 28605f3eb0e061e834b944b9bfbaf6cded9347d0 | [
"MIT"
] | null | null | null | classicEvoAlg.py | kondiiq/FIR-filters-with-evolutionally-algorithms | 28605f3eb0e061e834b944b9bfbaf6cded9347d0 | [
"MIT"
] | null | null | null | import random
import pandas
import matplotlib
from scipy import signal
import numpy as np
def create_sample():
"""
Funkcja generująca pojedynczą próbkę
Function which generating single sample
:param
":return float sample
"""
sample = np.random.random_sample()
return sample
def create_starting_population(number_of_sample: int = 200):
"""
Funkcja generująca zadaną populację wykorzystującą funkcje create_sample
Function which generating population using create_sample()
:parameter number_of_sample
:return array of samples
"""
samples = []
if number_of_sample <= 0:
raise ValueError('Array should have more than 0 samples and must not have negative numbers of index!!')
else:
for number in range(0, number_of_sample):
create_sample()
samples.append(create_sample())
print(samples)
return samples
def choose_best_samples(): # potrzebuje listy !!!
"""
Funkcja sortująca wszystkie próbki oraz wyszukująca najlepsze próbki z przekazanej tablicy próbek
Function which sort samples and finding best samples from common array
:parameter array, number of needed samples
:return: best n samples of array
"""
pass
def cross_samples():
"""
Funkcja wyszukująca najlepsze według algorytmu próbki i dodające je do nowej listy
Function which find
:return array of samples
"""
choose_best_samples()
if __name__ == "__main__":
numbers_of_samples = int(257)
population = create_starting_population(numbers_of_samples)
| 24.166667 | 111 | 0.707837 |
68a0ac28609e8506a310943c91f07a9939bd8cae | 4,049 | py | Python | peeringdb_server/migrations/0025_E164_phonenumbers.py | egfrank/peeringdb | 5ccb44c2955e29e9d9217f1a984dcb90a661ec62 | [
"BSD-2-Clause"
] | 1 | 2021-01-29T04:08:52.000Z | 2021-01-29T04:08:52.000Z | peeringdb_server/migrations/0025_E164_phonenumbers.py | egfrank/peeringdb | 5ccb44c2955e29e9d9217f1a984dcb90a661ec62 | [
"BSD-2-Clause"
] | 7 | 2021-04-06T18:42:16.000Z | 2021-09-08T03:01:50.000Z | peeringdb_server/migrations/0025_E164_phonenumbers.py | invictus80/peeringdb | dddbb4e34af444b8403fa9ccf9b72279bd7719eb | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 1.11.23 on 2019-12-12 08:46
import csv
import phonenumbers
from django.db import migrations
from django.conf import settings
def _edit_url(tag, instance):
if tag == "poc":
return f"{settings.BASE_URL}/net/{instance.network_id}/"
else:
return f"{settings.BASE_URL}/ix/{instance.id}/"
def _fix_number(tag, instance, field_name, list_fixed, list_invalid):
number = getattr(instance, field_name, None).strip()
if number:
try:
country = getattr(instance, "country", None)
if country:
country = country.code
parsed_number = phonenumbers.parse(number, country)
validated_number = phonenumbers.format_number(
parsed_number, phonenumbers.PhoneNumberFormat.E164
)
if f"{validated_number}" == f"{number}":
return
setattr(instance, field_name, validated_number)
list_fixed.append(
[
tag,
instance.id,
_edit_url(tag, instance),
instance.status,
field_name,
number,
validated_number,
country,
]
)
print("FIXED", tag, instance.id, field_name, number, validated_number)
instance.save()
except Exception as exc:
_push_invalid(tag, instance, field_name, number, list_invalid, f"{exc}")
print("INVALID", tag, instance.id, field_name, number)
def _push_invalid(tag, instance, field_name, number, list_invalid, reason):
country = getattr(instance, "country", None)
if country:
country = country.code
list_invalid.append(
[
tag,
instance.id,
_edit_url(tag, instance),
instance.status,
field_name,
number,
country,
reason.strip(),
]
)
def forwards_func(apps, schema_editor):
"""
Attempt to validate existing phone numbers to E164 format
Output any that can't be validated to a invalid_phonenumbers.csv file
Output any that were fixed to a fixed_phonenumbers.csv file
"""
InternetExchange = apps.get_model("peeringdb_server", "InternetExchange")
NetworkContact = apps.get_model("peeringdb_server", "NetworkContact")
headers_invalid = [
"type",
"id",
"status",
"field",
"phonenumber",
"country",
"reason",
]
headers_fixed = [
"type",
"id",
"status",
"field",
"phonenumber",
"fixed",
"country",
]
invalid = []
fixed = []
for ix in InternetExchange.handleref.filter(status__in=["ok", "pending"]):
_fix_number("ix", ix, "tech_phone", fixed, invalid)
_fix_number("ix", ix, "policy_phone", fixed, invalid)
for poc in NetworkContact.handleref.filter(status__in=["ok", "pending"]):
_fix_number("poc", poc, "phone", fixed, invalid)
""" This was used in production as a one time process
print(
"Invalid numbers: {} - written to invalid_phonenumbers.csv".format(len(invalid))
)
with open("invalid_phonenumbers.csv", "w+") as csvfile:
csvwriter = csv.writer(csvfile, lineterminator="\n")
csvwriter.writerow(headers_invalid)
for row in invalid:
csvwriter.writerow(row)
print("Fixed numbers: {} - written to fixed_phonenumbers.csv".format(len(fixed)))
with open("fixed_phonenumbers.csv", "w+") as csvfile:
csvwriter = csv.writer(csvfile, lineterminator="\n")
csvwriter.writerow(headers_fixed)
for row in fixed:
csvwriter.writerow(row)
"""
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0024_netixlan_asn"),
]
operations = [
migrations.RunPython(forwards_func, migrations.RunPython.noop),
]
| 28.514085 | 88 | 0.583601 |
fd43f52b80543b98e410bda62579f5a759874f4d | 1,453 | py | Python | config.py | Andrew342/flask_study | a6b5140782cb50e8ce5c9a486d5f4326ad880241 | [
"MIT"
] | null | null | null | config.py | Andrew342/flask_study | a6b5140782cb50e8ce5c9a486d5f4326ad880241 | [
"MIT"
] | 7 | 2021-02-08T20:19:36.000Z | 2022-03-11T23:17:07.000Z | config.py | Andrew342/flask_study | a6b5140782cb50e8ce5c9a486d5f4326ad880241 | [
"MIT"
] | null | null | null | import os
# basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
MAIL_SERVER = os.environ.get('MAIL_SERVER', 'smtp.qq.com')
# MAIL_SERVER="smtp.qq.com"
MAIL_PORT = int(os.environ.get('MAIL_PORT', '465'))
# MAIL_PORT=465
MAIL_USE_SSL = os.environ.get('MAIL_USE_SSL', 'true').lower() in \
['true', 'on', '1']
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
# MAIL_USERNAME = '635771687@qq.com'
# MAIL_PASSWORD = 'fuysxgnvhsgmbdab'
FLASKY_MAIL_SUBJECT_PREFIX = ''
FLASKY_MAIL_SENDER = '635771687@qq.com'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_COMMIT_ON_TEARDOWN=True
FLASKY_POSTS_PER_PAGE = 20
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:password@localhost:3306/data_dev"
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:password@localhost:3306/data_test"
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:password@localhost:3306/data"
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} | 27.942308 | 86 | 0.706813 |
240755fa9c6876cbc6007319eec4903032615eaa | 2,650 | py | Python | tutorials/pp-series/HRNet-Keypoint/lib/utils/stats.py | Intsigstephon/models | b96158ab16a252e9566ca03faa9d1ef0696798db | [
"Apache-2.0"
] | 1 | 2022-01-21T02:28:25.000Z | 2022-01-21T02:28:25.000Z | tutorials/pp-series/HRNet-Keypoint/lib/utils/stats.py | Intsigstephon/models | b96158ab16a252e9566ca03faa9d1ef0696798db | [
"Apache-2.0"
] | 1 | 2022-03-08T06:59:20.000Z | 2022-03-08T06:59:42.000Z | tutorials/pp-series/HRNet-Keypoint/lib/utils/stats.py | Intsigstephon/models | b96158ab16a252e9566ca03faa9d1ef0696798db | [
"Apache-2.0"
] | 2 | 2022-03-05T05:25:47.000Z | 2022-03-06T07:22:00.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numpy as np
__all__ = ['SmoothedValue', 'TrainingStats']
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({avg:.4f})"
self.deque = collections.deque(maxlen=window_size)
self.fmt = fmt
self.total = 0.
self.count = 0
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
@property
def median(self):
return np.median(self.deque)
@property
def avg(self):
return np.mean(self.deque)
@property
def max(self):
return np.max(self.deque)
@property
def value(self):
return self.deque[-1]
@property
def global_avg(self):
return self.total / self.count
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, max=self.max, value=self.value)
class TrainingStats(object):
def __init__(self, window_size, delimiter=' '):
self.meters = None
self.window_size = window_size
self.delimiter = delimiter
def update(self, stats):
if self.meters is None:
self.meters = {
k: SmoothedValue(self.window_size)
for k in stats.keys()
}
for k, v in self.meters.items():
v.update(stats[k].numpy())
def get(self, extras=None):
stats = collections.OrderedDict()
if extras:
for k, v in extras.items():
stats[k] = v
for k, v in self.meters.items():
stats[k] = format(v.median, '.6f')
return stats
def log(self, extras=None):
d = self.get(extras)
strs = []
for k, v in d.items():
strs.append("{}: {}".format(k, str(v)))
return self.delimiter.join(strs)
| 27.894737 | 77 | 0.609057 |
9be98e0387c8d680738bb62716e7890cd5254387 | 10,610 | py | Python | silence/server/endpoint_creator.py | DEAL-US/Silence | dd8b7fb229214edeb173bc417a6aeb6083072149 | [
"MIT"
] | 2 | 2022-03-21T23:40:21.000Z | 2022-03-21T23:40:22.000Z | silence/server/endpoint_creator.py | DEAL-US/Silence | dd8b7fb229214edeb173bc417a6aeb6083072149 | [
"MIT"
] | null | null | null | silence/server/endpoint_creator.py | DEAL-US/Silence | dd8b7fb229214edeb173bc417a6aeb6083072149 | [
"MIT"
] | null | null | null | import json
from os import listdir, getcwd, path, mkdir, makedirs
from silence.sql.tables import get_tables, get_views, get_primary_key, is_auto_increment
from silence.logging.default_logger import logger
from shutil import rmtree
# SILENCE CREATEAPI OPERATIONS
# Entry point for CLI command
def create_api():
existing_routes_method_pairs = get_user_endpoints()
logger.info("Found the following user defined endpoints:")
for rmp in existing_routes_method_pairs:
logger.info(rmp)
create_entity_endpoints(existing_routes_method_pairs)
###############################################################################
# Create generic .js files for consuming the created endpoints.
###############################################################################
def generate_API_file_for_endpoints(endpoints, name):
curr_dir = getcwd()
api_path = curr_dir + "/web/js/api"
if not path.isdir(api_path):
makedirs(api_path)
logger.info(f"Generating JS API files for {name}")
file_content = f"""/*
* DO NOT EDIT THIS FILE, it is auto-generated. It will be updated automatically.
* All changes done to this file will be lost upon re-running the 'silence createapi' command.
* If you want to create new API methods, define them in a new file.
*
* Silence is built and maintained by the DEAL research group at the University of Seville.
* You can find us at https://deal.us.es
*/
"use strict";
import {{ BASE_URL, requestOptions }} from './common.js';
const {name}API_auto = {{"""
for endpoint in endpoints:
operation = endpoint[0]
route = endpoint[1]
method = endpoint[2]
id_plain = endpoint[3]
description = endpoint[4]
id_var = "${" + f"{id_plain}" + "}"
file_content += "\n\n"
if method == "GET":
if "$" in route:
route = f"{name}/{id_var}"
file_content += generate_api_text(operation, description, method.lower(), route, id_plain, unique_result=True)
else:
route = f"{name}"
file_content += generate_api_text(operation, description, method.lower(), route)
elif method == "POST":
route = f"{name}"
file_content += generate_api_text(operation, description, method.lower(), route, has_body_params=True)
elif method == "DELETE":
route = f"{name}/{id_var}"
file_content += generate_api_text(operation, description, method.lower(), route, id_plain)
elif method == "PUT":
route = f"{name}/{id_var}"
file_content += generate_api_text(operation, description, method.lower(), route, id_plain, True)
file_content += "\n};\n\nexport {" + f"{name}API_auto" +"};"
api_path + f"{name}.js"
with open(f"{api_path}/_{name}.js", "w") as api_point:
api_point.write(file_content)
def generate_api_text(operation, description, method, route, id_plain = "", has_body_params = False, unique_result = False):
has_body_param_text = ""
if has_body_params:
has_body_param_text = "formData, "
unique_res_text = ""
if unique_result:
unique_res_text += "[0]"
form_data_arg = has_body_param_text
if not id_plain:
form_data_arg = "formData"
file_content = f""" /**
* {description}
*/
{operation}: async function({form_data_arg}{id_plain}) {{
let response = await axios.{method}(`${{BASE_URL}}/{route}`, {has_body_param_text}requestOptions);
return response.data{unique_res_text};
}},"""
return file_content
#############################################################################
# Get the entities from the database and the existing user endpoints and #
# create CRUD endpoint files (json) for the remaining ones. #
#############################################################################
def create_entity_endpoints(existing_routes_method_pairs):
# Folder handling
curr_dir = getcwd()
endpoints_dir = curr_dir + "/endpoints"
auto_dir = endpoints_dir + "/auto"
logger.debug(f"Selected endpoint directory --> {auto_dir}")
try:
rmtree(auto_dir)
except FileNotFoundError:
logger.debug("Folder is not there, creating it.")
logger.debug(f"re-creating directory --> {auto_dir}")
mkdir(auto_dir)
# Endpoint files creation
tables = get_tables()
for table in list(tables.items()):
pk = get_primary_key(table[0])
is_AI = is_auto_increment(table[0], pk)
endpoints = {}
table[1].remove(pk)
name = table[0].lower()
ep_tuples = []
logger.info(f"Generating endpoints for {name}")
get_all_route = f"/{name}"
if (get_all_route, "GET") not in existing_routes_method_pairs:
endpoints["getAll"] = generate_get_all(get_all_route, table)
endpoint = ("getAll", get_all_route, "GET", pk, endpoints["getAll"]["description"])
ep_tuples.append(endpoint)
get_by_id_route = f"/{name}/${pk}"
if (get_by_id_route, "GET") not in existing_routes_method_pairs:
endpoints["getById"] = generate_get_by_id(get_by_id_route,table, pk)
endpoint = ("getById", get_by_id_route, "GET", pk, endpoints["getById"]["description"])
ep_tuples.append(endpoint)
create_route = f"/{name}"
if (create_route, "POST") not in existing_routes_method_pairs:
endpoints["create"] = generate_create(create_route, table, pk, is_AI)
endpoint = ("create", create_route, "POST", pk, endpoints["create"]["description"])
ep_tuples.append(endpoint)
udpate_route = f"/{name}/${pk}"
if (udpate_route, "PUT") not in existing_routes_method_pairs:
endpoints["update"] = generate_update(udpate_route,table, pk)
endpoint = ("update", udpate_route, "PUT", pk, endpoints["update"]["description"])
ep_tuples.append(endpoint)
delete_route = f"/{name}/${pk}"
if (delete_route,"DELETE") not in existing_routes_method_pairs:
endpoints["delete"] = generate_delete(delete_route,table, pk)
endpoint = ("delete", delete_route, "DELETE", pk, endpoints["delete"]["description"])
ep_tuples.append(endpoint)
generate_API_file_for_endpoints(ep_tuples, name)
dicts_to_file(endpoints, name, auto_dir)
views = get_views()
for view in list(views.items()):
endpoints = {}
ep_tuples = []
name = view[0].lower()
logger.info(f"Generating endpoints for {name}")
get_all_route = f"/{name}"
if (get_all_route, "GET") not in existing_routes_method_pairs:
endpoints["getAll"] = generate_get_all(get_all_route, view)
endpoint = ("getAll", get_all_route, "GET", pk, endpoints["getAll"]["description"])
ep_tuples.append(endpoint)
generate_API_file_for_endpoints(ep_tuples, name)
dicts_to_file(endpoints, name, auto_dir)
def get_user_endpoints():
logger.debug("Looking for user endpoints")
# Load every .json file inside the endpoints/ or api/ folders
curr_dir = getcwd()
endpoints_dir = curr_dir + "/endpoints"
if not path.isdir(endpoints_dir):
mkdir(endpoints_dir)
endpoint_paths_json_user = [endpoints_dir + f"/{f}" for f in listdir(endpoints_dir) if f.endswith('.json')]
endpoint_route_method_pairs = []
for jsonfile in endpoint_paths_json_user:
with open(jsonfile, "r") as ep:
endpoints = list(json.load(ep).values())
endpoint_pairs = [(endpoint['route'], endpoint['method']) for endpoint in endpoints]
endpoint_route_method_pairs += endpoint_pairs
return endpoint_route_method_pairs
def dicts_to_file(dicts, name, auto_dir):
if dicts:
all_jsons = json.dumps(dicts, indent=4)
with open(auto_dir+f"/{name}.json", "w") as endpoint:
endpoint.write(all_jsons)
def generate_get_all(route,table):
res = {}
name = table[0]
res["route"] = route
res["method"] = "GET"
res["sql"] = f"SELECT * FROM {name}"
res["auth_required"] = False
res["allowed_roles"] = ["*"]
res["description"] = f"Gets all {name}"
# res["request_body_params"] = []
return res
def generate_get_by_id(route,table, pk):
res = {}
name = table[0]
res["route"] = route
res["method"] = "GET"
res["sql"] = f"SELECT * FROM {name} WHERE {pk} = ${pk}"
res["auth_required"] = False
res["allowed_roles"] = ["*"]
res["description"] = f"Gets an entry from '{name}' by its primary key"
# res["request_body_params"] = []
return res
def generate_create(route, table, pk, is_auto_increment):
res = {}
name = table[0]
param_list = table[1]
if not is_auto_increment:
param_list.append(pk)
res["route"] = route
res["method"] = "POST"
res["sql"] = f"INSERT INTO {name}" + params_to_string(param_list, "", is_create=True) + " VALUES " + params_to_string(param_list, "$", is_create=True)
res["auth_required"] = True
res["allowed_roles"] = ["*"]
res["description"] = f"Creates a new entry in '{name}'"
res["request_body_params"] = param_list
return res
def generate_update(route,table, pk):
res = {}
name = table[0]
param_list = table[1]
res["route"] = route
res["method"] = "PUT"
res["sql"] = f"UPDATE {name} SET " + params_to_string(param_list, "", is_update=True) + f" WHERE {pk} = ${pk}"
res["auth_required"] = True
res["allowed_roles"] = ["*"]
res["description"] = f"Updates an existing entry in '{name}' by its primary key"
res["request_body_params"] = param_list
return res
def generate_delete(route,table,pk):
res = {}
name = table[0]
res["route"] = route
res["method"] = "DELETE"
res["sql"] = f"DELETE FROM {name} WHERE {pk} = ${pk}"
res["auth_required"] = True
res["allowed_roles"] = ["*"]
res["description"] = f"Deletes an existing entry in '{name}' by its primary key"
# res["request_body_params"] = []
return res
def params_to_string(param_list, char_add, is_create = False, is_update = False):
if is_create:
res = "("
for p in param_list:
res += char_add + p +", "
res = res[:-2]
res += ")"
return res
if is_update:
res = ""
for p in param_list:
res += p +" = $"+ p + ", "
res = res[:-2]
return res
| 35.13245 | 154 | 0.608483 |
f74105373cdd2d7cc2a1c4cfafa7abdd7842398a | 4,119 | py | Python | sunpy/tests/setup_command.py | Naman9639/sunpy | 24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56 | [
"BSD-2-Clause"
] | null | null | null | sunpy/tests/setup_command.py | Naman9639/sunpy | 24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56 | [
"BSD-2-Clause"
] | null | null | null | sunpy/tests/setup_command.py | Naman9639/sunpy | 24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This file is designed to be imported and ran only via setup.py, hence it's
dependency on astropy_helpers which will be available in that context.
"""
import os
import copy
from astropy_helpers.commands.test import AstropyTest
class SunPyTest(AstropyTest):
description = 'Run the tests for this package'
user_options = copy.copy(AstropyTest.user_options)
user_options.remove(('remote-data=', 'R',
'Run tests that download remote data. Should be '
'one of none/astropy/any (defaults to none).'))
user_options += [('online', 'R',
'Also run tests that do require a internet connection.'),
('online-only', None,
'Only run test that do require a internet connection.'),
('cov-report=', None,
'How to display the coverage report, should be either "html" or "term"'),
('figure', None,
'Run the figure tests.'),
# Run only tests that check figure generation
('figure-only', None,
'Only run tests that compare figures against stored hashes.')]
package_name = ''
def initialize_options(self):
super().initialize_options()
self.online = False
self.online_only = False
self.figure = False
self.figure_only = False
self.cov_report = True
def _generate_coverage_commands(self):
cmd_pre = '' # Commands to run before the test function
# patch the .coverage file so the paths are correct to the directory
# setup.py was run in rather than the temporary directory.
cwd = os.path.abspath(".")
cmd_post = ('from sunpy.tests.helpers import _patch_coverage; '
'import os; '
'test_dir = os.path.abspath("."); '
f'_patch_coverage(test_dir, "{cwd}"); ')
# Make html report the default and make pytest-cov save it to the
# source directory not the temporary directory.
if self.cov_report and (isinstance(self.cov_report, bool) or "html" in self.cov_report):
html_cov = os.path.join(os.path.abspath("."), "htmlcov")
self.cov_report = f'html:{html_cov}'
else:
self.cov_report = self.cov_report
return cmd_pre, cmd_post
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.self_test('
'package={1.package!r}, '
'test_path={1.test_path!r}, '
'args={1.args!r}, '
'coverage={1.coverage!r}, '
'cov_report={1.cov_report!r}, '
'plugins={1.plugins!r}, '
'verbose={1.verbose_results!r}, '
'pastebin={1.pastebin!r}, '
'online={1.online!r}, '
'online_only={1.online_only!r}, '
'figure={1.figure!r}, '
'figure_only={1.figure_only!r}, '
'figure_dir="{figure_dir}", '
'pep8={1.pep8!r}, '
'pdb={1.pdb!r}, '
'open_files={1.open_files!r}, '
'parallel={1.parallel!r}, '
'docs_path={1.docs_path!r}, '
'skip_docs={1.skip_docs!r}, '
'repeat={1.repeat!r})); '
'{cmd_post}'
'sys.exit(result)')
return cmd.format('pass',
self,
figure_dir=os.path.join(os.path.abspath('.'), "figure_test_images"),
cmd_pre=cmd_pre,
cmd_post=cmd_post)
| 38.858491 | 96 | 0.53144 |
16ca8a95583e04689085a4bb821332b7662252e1 | 15,436 | py | Python | ndg_tools/language/processors/tokens.py | ndgigliotti/data-science-toolkit | a8683273d2b503cd772ad597b790965a65b4cccc | [
"MIT"
] | null | null | null | ndg_tools/language/processors/tokens.py | ndgigliotti/data-science-toolkit | a8683273d2b503cd772ad597b790965a65b4cccc | [
"MIT"
] | 1 | 2021-12-12T08:04:30.000Z | 2021-12-12T08:08:42.000Z | ndg_tools/language/processors/tokens.py | ndgigliotti/data-science-toolkit | a8683273d2b503cd772ad597b790965a65b4cccc | [
"MIT"
] | null | null | null | import re
import string
from collections import Counter, defaultdict
from functools import lru_cache, singledispatch
from types import MappingProxyType
from typing import Iterable, Set, Union
import nltk
import numpy as np
from nltk.corpus.reader import wordnet
from nltk.sentiment.util import mark_negation as nltk_mark_neg
from pandas.core.series import Series
from ndg_tools import utils
from ndg_tools._validation import _validate_tokens
from ndg_tools.language.settings import CACHE_SIZE, DEFAULT_SEP
from ndg_tools.language.utils import process_tokens
from ndg_tools.typing import (TaggedTokens, TaggedTokenTuple, TokenDocs, Tokens,
TokenTuple)
nltk.download("averaged_perceptron_tagger")
nltk.download("universal_tagset")
nltk.download("wordnet")
RE_NEG = re.compile(r"_NEG$")
UNIV_TO_WORDNET = MappingProxyType(
{
"ADJ": wordnet.ADJ,
"NOUN": wordnet.NOUN,
"PRON": wordnet.NOUN,
"ADV": wordnet.ADV,
"VERB": wordnet.VERB,
}
)
"""Mapping of Universal POS tags to Wordnet POS tags."""
PTB_TO_UNIV = MappingProxyType(nltk.tagset_mapping("en-ptb", "universal"))
"""Mapping of Penn Treebank POS tags to Universal POS tags."""
PTB_TO_WORDNET = MappingProxyType(
{
"JJ": wordnet.ADJ,
"JJR": wordnet.ADJ,
"JJRJR": wordnet.ADJ,
"JJS": wordnet.ADJ,
"JJ|RB": wordnet.ADJ,
"JJ|VBG": wordnet.ADJ,
"MD": wordnet.VERB,
"NN": wordnet.NOUN,
"NNP": wordnet.NOUN,
"NNPS": wordnet.NOUN,
"NNS": wordnet.NOUN,
"NN|NNS": wordnet.NOUN,
"NN|SYM": wordnet.NOUN,
"NN|VBG": wordnet.NOUN,
"NP": wordnet.NOUN,
"PRP": wordnet.NOUN,
"PRP$": wordnet.NOUN,
"PRP|VBP": wordnet.NOUN,
"RB": wordnet.ADV,
"RBR": wordnet.ADV,
"RBS": wordnet.ADV,
"RB|RP": wordnet.ADV,
"RB|VBG": wordnet.ADV,
"VB": wordnet.VERB,
"VBD": wordnet.VERB,
"VBD|VBN": wordnet.VERB,
"VBG": wordnet.VERB,
"VBG|NN": wordnet.VERB,
"VBN": wordnet.VERB,
"VBP": wordnet.VERB,
"VBP|TO": wordnet.VERB,
"VBZ": wordnet.VERB,
"VP": wordnet.VERB,
"WP": wordnet.NOUN,
"WP$": wordnet.NOUN,
"WRB": wordnet.ADV,
}
)
"""Mapping of Penn Treebank POS tags to Wordnet POS tags."""
@singledispatch
def mark_negation(
tokens: Tokens,
double_neg_flip: bool = False,
split=False,
sep: str = DEFAULT_SEP,
) -> Tokens:
"""Mark tokens '_NEG' which fall between a negating word and punctuation mark.
Wrapper for nltk.sentiment.util.mark_negation. Keeps cache
to reuse previously computed results.
Parameters
----------
tokens : sequence of str
Sequence of tokens to mark negated words in.
double_neg_flip : bool, optional
Ignore double negation. False by default.
split: bool, optional
Break off 'NEG' tags into separate tokens. False by default.
sep : str, optional
Separator for 'NEG' suffix.
Returns
-------
sequence of str
Tokens with negation marked.
"""
# Fallback dispatch to catch any seq of tokens
_validate_tokens(tokens)
# Make immutable (hashable)
# Send to tuple dispatch for caching
tokens = mark_negation(
tuple(tokens),
double_neg_flip=double_neg_flip,
split=split,
sep=sep,
)
# Make mutable and return
return list(tokens)
@mark_negation.register
@lru_cache(maxsize=CACHE_SIZE, typed=False)
def _(
tokens: tuple,
double_neg_flip: bool = False,
split=False,
sep: str = DEFAULT_SEP,
) -> TokenTuple:
"""Dispatch for tuple. Keeps cache to reuse previous results."""
_validate_tokens(tokens)
# Make mutable
tokens = list(tokens)
# Apply nltk.sentiment.util.mark_negation
tokens = nltk_mark_neg(tokens, double_neg_flip=double_neg_flip)
if split:
# Convert tags into independent 'NEG' tokens
for i, token in enumerate(tokens):
if RE_NEG.search(token):
tokens[i] = token[: token.rfind("_")]
tokens.insert(i + 1, "NEG")
elif sep != "_":
# Subsitute underscore for `sep`
for i, word in enumerate(tokens):
tokens[i] = RE_NEG.sub(f"{sep}NEG", word)
# Make immutable and return
return tuple(tokens)
@singledispatch
def pos_tag(
tokens: Tokens,
tagset: str = None,
lang: str = "eng",
fuse_tuples=False,
split_tuples=False,
replace=False,
sep: str = DEFAULT_SEP,
) -> Union[TaggedTokens, Tokens]:
"""Tag `tokens` with parts of speech.
Wrapper for `nltk.pos_tag`. Keeps cache to reuse
previous results.
Parameters
----------
tokens : sequence of str
Word tokens to tag with PoS.
tagset : str, optional
Name of NLTK tagset to use, defaults to Penn Treebank
if not specified. Unfortunately, NLTK does not have a
consistent approach to their tagset names.
lang : str, optional
Language of `tokens`, by default "eng".
fuse_tuples : bool, optional
Join ('token', 'tag') tuples as 'token_tag' according to `sep`.
By default False.
split_tuples : bool, optional
Break up tuples so that tags mingle with the tokens. Equivalent to
flattening the sequence. By default False.
replace : bool, optional
Replace word tokens with their PoS tags, by default False.
sep : str, optional
Separator used if `fuse_tuples` is set.
Returns
-------
sequence of tuple of str, or sequence of str
Tokens tagged with parts of speech, or related sequence of str.
"""
# Fallback dispatch to catch any seq of tokens
_validate_tokens(tokens)
# Make immutable (hashable)
# Send to tuple dispatch for caching
tokens = pos_tag(
tuple(tokens),
tagset=tagset,
lang=lang,
fuse_tuples=fuse_tuples,
split_tuples=split_tuples,
replace=replace,
sep=sep,
)
# Make mutable and return
return list(tokens)
@pos_tag.register
@lru_cache(maxsize=CACHE_SIZE, typed=False)
def _(
tokens: tuple,
tagset: str = None,
lang: str = "eng",
fuse_tuples=False,
split_tuples=False,
replace=False,
sep=DEFAULT_SEP,
) -> TaggedTokenTuple:
"""Dispatch for tuple. Keeps cache to reuse previous results."""
# Validate params
_validate_tokens(tokens)
if sum([fuse_tuples, split_tuples, replace]) > 1:
raise ValueError(
"Only one of `fuse_tuples`, `split_tuples`, or `replace` may be True."
)
# Tag PoS
tokens = nltk.pos_tag(tokens, tagset=tagset, lang=lang)
if fuse_tuples:
# Fuse tuples
tokens = [nltk.tuple2str(x, sep) for x in tokens]
elif split_tuples:
# Split each tuple into two word tokens
tokens = [x for tup in tokens for x in tup]
elif replace:
# Replace word tokens with PoS tags
tokens = [t for _, t in tokens]
return tuple(tokens)
def filter_pos(tokens: TaggedTokens, include=None, exclude=None):
if include is None and exclude is None:
raise ValueError("Must pass either `include` or `exclude`.")
if include is not None and exclude is not None:
raise ValueError("Can only pass one of `include` or `exclude`.")
tokens = utils.swap_index(Series(dict(tokens)))
if include is not None:
exclude = tokens.index.difference(include)
tokens.drop(exclude, inplace=True)
return tokens.to_list()
def _tag_wordnet_pos(tokens: Tokens):
translate = defaultdict(lambda: wordnet.NOUN, **UNIV_TO_WORDNET)
return [(w, translate[t]) for w, t in nltk.pos_tag(tokens, tagset="universal")]
def wordnet_lemmatize(
tokens: TokenDocs, *, preserve: Iterable[str] = None, n_jobs=None
) -> TokenDocs:
"""Reduce English words to root form using Wordnet.
Tokens are first tagged with parts of speech and then
lemmatized accordingly.
Parameters
----------
tokens : sequence of str
Tokens to lemmatize.
Returns
-------
Sequence of str
Lemmatized tokens.
"""
lemmatizer = nltk.WordNetLemmatizer()
if preserve is None:
preserve = frozenset()
else:
preserve = frozenset(preserve)
def process_singular(tokens, lemmatizer=lemmatizer, preserve=preserve):
# Tag POS
tokens = _tag_wordnet_pos(tokens)
# Lemmatize
return [w if w in preserve else lemmatizer.lemmatize(w, t) for w, t in tokens]
return process_tokens(
tokens, process_singular, n_jobs=n_jobs, bar_desc="wordnet_lemmatize"
)
def porter_stem(tokens: Tokens, preserve: Iterable[str] = None, n_jobs=None) -> Tokens:
"""Reduce English words to stems using Porter algorithm.
Parameters
----------
tokens : sequence of str
Tokens to stem.
lowercase : bool, optional
Make lowercase, by default False.
Returns
-------
Sequence of str
Stemmed tokens.
"""
stemmer = nltk.PorterStemmer()
if preserve is None:
preserve = frozenset()
else:
preserve = frozenset(preserve)
def process_singular(tokens, stemmer=stemmer, preserve=preserve):
return [w if w in preserve else stemmer.stem(w, False) for w in tokens]
return process_tokens(
tokens, process_singular, n_jobs=n_jobs, bar_desc="porter_stem"
)
def length_filter(tokens: TokenDocs, min_char=0, max_char=20, n_jobs=None) -> TokenDocs:
"""Remove tokens with too few or too many characters.
Parameters
----------
tokens : sequence of str
Tokens to filter by length.
min_char : int, optional
Minimum length, by default 0.
max_char : int, optional
Maximum length, by default 20.
Returns
-------
Sequence of str
Filtered tokens.
"""
if min_char is None:
min_char = 0
if max_char is not None:
if min_char > max_char or max_char < min_char:
raise ValueError("`min_char` must be less than `max_char`.")
def process_singular(tokens, min_char=min_char, max_char=max_char):
if max_char is None:
tokens = [w for w in tokens if min_char <= len(w)]
else:
tokens = [w for w in tokens if min_char <= len(w) <= max_char]
return tokens
return process_tokens(
tokens, process_singular, n_jobs=n_jobs, bar_desc="length_filter"
)
def uniq_ratio(text: str):
return len(set(text)) / len(text)
def dom_ratio(text):
freqs = np.array(list(Counter(text).values()))
return freqs.max() / freqs.sum()
def uniq_char_thresh(tokens: TokenDocs, thresh=0.375, n_jobs=None) -> TokenDocs:
"""Remove tokens with low character uniqueness ratio.
Parameters
----------
tokens : sequence of str
Tokens to filter.
thresh : float, optional
Minimum uniquess ratio, by default 0.375.
Returns
-------
Sequence of str
Filtered tokens.
"""
if not (0.0 < thresh < 1.0):
raise ValueError("`thresh` must be between 0.0 and 1.0.")
def process_singular(tokens, thresh=thresh):
return [w for w in tokens if uniq_ratio(w) > thresh]
return process_tokens(
tokens, process_singular, n_jobs=n_jobs, bar_desc="uniq_char_thresh"
)
def char_dom_thresh(tokens: TokenDocs, thresh=0.75, n_jobs=None) -> TokenDocs:
"""Remove tokens which are dominated by a single character.
Parameters
----------
tokens : sequence of str
Tokens to filter.
thresh : float, optional
Maximum majority ratio, by default 0.25.
Returns
-------
Sequence of str
Filtered tokens.
"""
if not (0 < thresh < 1):
raise ValueError("`thresh` must be between 0.0 and 1.0.")
def process_singular(tokens, thresh=thresh):
return [w for w in tokens if dom_ratio(w) < thresh]
return process_tokens(
tokens, process_singular, n_jobs=n_jobs, bar_desc="char_dom_thresh"
)
def remove_stopwords(
tokens: TokenDocs,
stopwords: Union[str, Set[str]] = "nltk_english",
n_jobs: int = None,
) -> TokenDocs:
"""Remove stopwords from `tokens`.
Parameters
----------
docs : sequence of str
Tokens to remove stopwords from.
stopwords : str or collection of str, optional
Set of stopwords, name of recognized stopwords set, or query.
Defaults to 'nltk_english'.
Returns
-------
Sequence of str
Tokens with stopwords removed.
"""
if isinstance(stopwords, str):
stopwords = frozenset(fetch_stopwords(stopwords))
else:
stopwords = frozenset(stopwords)
def process_singular(tokens, stopwords=stopwords):
return [w for w in tokens if w not in stopwords]
return process_tokens(
tokens, process_singular, n_jobs=n_jobs, bar_desc="remove_stopwords"
)
def fetch_stopwords(query: str) -> Set[str]:
"""Fetch a recognized stopwords set.
Recognized sets include 'sklearn_english', 'nltk_english', 'nltk_spanish',
'nltk_french'. Will recognize 'nltk_{language}' in general if provided the
language (fileid) of an NLTK stopwords set. Supports complex queries involving
set operators '|', '&', '-', and '^' and parentheses.
Parameters
----------
query: str
Name of recognized stopwords set or complex query involving names.
Returns
-------
set of str
A set of stop words.
"""
# Validate string
if not isinstance(query, str):
raise TypeError(f"Expected `name` to be str, got {type(query)}.")
# Process string input
else:
# Perform complex fetch with set ops
if set("|&-^") & set(query):
# Construct Python expression to fetch each set and perform set ops
expr = re.sub("\w+", lambda x: f"fetch_stopwords('{x[0]}')", query)
# Sanitize by restricting symbols
symbols = set(re.findall(fr"[{string.punctuation}]|\sif\s|\selse\s", expr))
if not symbols.issubset(set("|&-^_()'")):
raise ValueError(f"Invalid query: {query}")
# Evaluate expression
result = eval(expr)
# Fetch sklearn stopwords
elif query in {"sklearn_english", "sklearn"}:
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
result = set(ENGLISH_STOP_WORDS)
# Fetch NLTK stopwords
elif query.startswith("nltk"):
if "_" in query:
# Split name to get language
components = query.split("_")
# Only allow one language at a time (for uniform syntax)
if len(components) > 2:
raise ValueError(f"Invalid query: {query}")
# NLTK stopwords fileid e.g. 'english', 'spanish'
fileid = components[1]
result = set(nltk.corpus.stopwords.words(fileids=fileid))
else:
# Defaults to 'english' if no languages specified
result = set(nltk.corpus.stopwords.words("english"))
# Raise ValueError if unrecognized
else:
raise ValueError(f"Invalid query: {query}")
return result
| 29.627639 | 88 | 0.627818 |
16937bf465aaca3f8a0c3e5895950877056d4b8f | 578 | py | Python | example.py | vlukiyanov/id3-python-example | 4db88ad1ccdd335c7d26dd6ae645e3b55eac338f | [
"MIT"
] | null | null | null | example.py | vlukiyanov/id3-python-example | 4db88ad1ccdd335c7d26dd6ae645e3b55eac338f | [
"MIT"
] | null | null | null | example.py | vlukiyanov/id3-python-example | 4db88ad1ccdd335c7d26dd6ae645e3b55eac338f | [
"MIT"
] | null | null | null | import numpy as np
from id3 import id3
test_playtennis = np.array(
[
[0, 2, 1, 0, 0],
[0, 2, 1, 1, 0],
[1, 2, 1, 0, 1],
[2, 1, 1, 0, 1],
[2, 0, 0, 0, 1],
[2, 0, 0, 1, 0],
[1, 0, 0, 1, 1],
[0, 1, 1, 0, 0],
[0, 0, 0, 0, 1],
[2, 1, 0, 0, 1],
[0, 1, 0, 1, 1],
[1, 1, 1, 1, 1],
[1, 2, 0, 0, 1],
[2, 1, 1, 1, 0],
]
)
test_nclasses = [3, 3, 2, 2]
data = test_playtennis[:, 0:4]
target = test_playtennis[:, 4]
tree = id3(data, target, test_nclasses)
tree.show()
| 19.266667 | 39 | 0.382353 |
66d1d5a140416db88ae505a3744363cdac3bcfbe | 1,065 | py | Python | rename_file.py | hhaAndroid/u2net | fff1db4edb3c9675b82926980ce1d5dfa98c0a1e | [
"Apache-2.0"
] | 1 | 2021-01-09T07:40:08.000Z | 2021-01-09T07:40:08.000Z | rename_file.py | hhaAndroid/u2net | fff1db4edb3c9675b82926980ce1d5dfa98c0a1e | [
"Apache-2.0"
] | null | null | null | rename_file.py | hhaAndroid/u2net | fff1db4edb3c9675b82926980ce1d5dfa98c0a1e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ======================================================
# @Time : 21-1-9 下午7:41
# @Author : huang ha
# @Email : huang_ha@rr.com
# @File : rename_file.py
# @Comment:
# ======================================================
import os
import cv2
if __name__ == '__main__':
image_dir = '/home/hha/dataset/circle/circle'
files = os.listdir(image_dir)
# count=0
# for name in files:
# _, name1 = os.path.split(name)
# aaa = name1.split(".")
# NewName = os.path.join(image_dir, str(count)+'.'+aaa[1])
# OldName = os.path.join(image_dir, name)
# os.rename(OldName, NewName)
# count +=1
dst_dir='/home/hha/dataset/circle/circle1'
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
# 统一后缀,压缩图片
for name in files:
_, name1 = os.path.split(name)
suf_len = name1.split('.')[-1]
img=cv2.imread(os.path.join(image_dir, name))
dst_name=os.path.join(dst_dir, name1[:-len(suf_len)]+'jpg')
cv2.imwrite(dst_name,img) | 30.428571 | 67 | 0.522066 |
2862ea19de1a279124782988f3d92792e8c54c92 | 13,226 | py | Python | ProgsByDataset/ArxivMAG/nomulti_prepare_arxiv_hd2v_file.py | ashwath92/MastersThesis | f74755dc0c32f316da3c860dd5dbfa4c9cad97b3 | [
"MIT"
] | 5 | 2020-11-05T07:11:54.000Z | 2021-08-04T21:37:28.000Z | ProgsByDataset/ArxivMAG/nomulti_prepare_arxiv_hd2v_file.py | ashwath92/MastersThesis | f74755dc0c32f316da3c860dd5dbfa4c9cad97b3 | [
"MIT"
] | null | null | null | ProgsByDataset/ArxivMAG/nomulti_prepare_arxiv_hd2v_file.py | ashwath92/MastersThesis | f74755dc0c32f316da3c860dd5dbfa4c9cad97b3 | [
"MIT"
] | 4 | 2020-11-05T06:04:38.000Z | 2021-08-02T16:25:42.000Z | """ This prepares a file for hyperdoc2vec from the arxiv full text (only papers mapped to mag ids). """
import os
import re
import csv
import pickle
import sqlite3
import psycopg2
import psycopg2.extras
from gensim.parsing import preprocessing
from gensim.utils import to_unicode
import pandas as pd
from tqdm import tqdm
basepath = '/home/ashwath/Programs'
dbpath = os.path.join(basepath, 'ArxivCS', 'SQLITEDB', 'arxivcs_mag_mapping.sqlite3')
def db_connect(set_params=False, path = dbpath):
""" Connects to sqlite3 db given via a parameter/uses a default parameter.
It sets timeout=10 to prevent sqlite getting locked in between inserts. It
also sets detect_types to allow datetime/date datatypes in tables. """
connection = sqlite3.connect(path, timeout=10,
detect_types=sqlite3.PARSE_DECLTYPES)
if set_params is True:
# Speed up insertions: only called while creating the database
connection.execute('PRAGMA main.journal_mode=WAL;')
connection.execute('PRAGMA main.cache_size=10000;')
connection.execute('PRAGMA main.locking_mode=EXCLUSIVE;')
return connection
# GLOBALS
# Hyperdoc2vec markers for citations
docid_prefix='=-='
docid_suffix='-=-'
# IMPORTANT: I need a set of mag ids which are cited so that.
allmagpaperids = set()
# mag arxiv mapping db connection
sconn = db_connect()
scur = sconn.cursor()
# context connection: for getting the mag id of the CITED papers
meta_db_path = '/vol2/unarXive/arxiv-txt-data/metadata.db'
cconn = db_connect(path=meta_db_path)
ccur = cconn.cursor()
# Some arxiv ids are mapped to 2 magids, keep only 1 (data problem)
# 72246 rows in the results (out of 72315): 69 duplicates
# Training set is all years until 2016 (2017 is the test set)
# Training set: 62296 papers
# Test set: 9954 papers
trainingquery = """select arxiv_id, mag_id
from arxivcs_mag
where arxiv_id not like '17%'
group by mag_id;
"""
# Write test set
testsetquery = """select arxiv_id, mag_id
from arxivcs_mag
where arxiv_id like '17%'
group by mag_id;
"""
testresdf = pd.read_sql_query(testsetquery, sconn)
testresdf.to_csv('AdditionalOutputs/test_ids.tsv', index=False, sep='\t')
trainresdf = pd.read_sql_query(trainingquery, sconn)
trainresdf.to_csv('AdditionalOutputs/training_ids.tsv', index=False, sep='\t')
# Get a Series of mag ids for which we have full text
mag_id_series = trainresdf['mag_id']
# IMP: There seems to be some problem with the data?? Multiple arxiv ids are mapped to the same mag id
# Doing select mag_id from arxivcs_mag, and read_sql_query, then
# df[df.isin(df[df.duplicated()])] gives 69 records.
# Get a set of mag ids (mapped from arxiv of course) which have full text
inarxiv_papers_set = set(mag_id_series.tolist())
# POSTGRES connection obj and cursor
pconn = psycopg2.connect("dbname=MAG19 user=mag password=1maG$ host=shetland.informatik.uni-freiburg.de")
pcur = pconn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
# POSTGRES QUERY
magonly_query = """
SELECT titleandabstract.paperid, papertitle, abstract, contexts, referenceids
FROM
(
SELECT papers.paperid, papertitle, abstract FROM papers INNER JOIN paperabstracts
ON papers.paperid=paperabstracts.paperid
WHERE papers.paperid=%s) AS titleandabstract INNER JOIN
(
SELECT paperid, string_agg(paperreferenceid::character varying, ',') AS referenceids,
string_agg(citationcontext, ' ||--|| ') AS contexts
FROM papercitationcontexts
WHERE paperid=%s
GROUP BY paperid
) AS listofcontexts
ON titleandabstract.paperid=listofcontexts.paperid;"""
# Arxiv citing, cited list for Tarek.
arxiv_citing_cited_file = open('AdditionalOutputs/citing_cited.tsv', 'w')
fieldnames = ['citing_arxiv_id', 'citing_mag_id', 'cited_mag_id']
writer = csv.DictWriter(arxiv_citing_cited_file, delimiter="\t", fieldnames=fieldnames)
writer.writeheader()
citation_pattern = re.compile(r'(\{\{cite:)([a-zA-z0-9-]+)(\}\})')
def get_mag_from_uuid(matchobject):
""" """
cited_uuid = matchobject.group(2)
uuidquery = "select mag_id from bibitemmagidmap where uuid='{}'".format(cited_uuid)
ccur.execute(uuidquery)
res = ccur.fetchone()
if res is None:
# If the uuid does not map to a mag id, replace with the word citation.
#wordindex_magid_dict[i] = 'citation'
return 'citation'
else:
fetched_magid = res[0]
allmagpaperids.add(fetched_magid)
return '{}{}{}'.format(docid_prefix, fetched_magid, docid_suffix)
""" def read_arxiv_addmagids(arxiv_id, mag_id, ccur):
Read arxiv full text,
arxiv_filepath = '/vol2/unarXive/arxiv-txt-data/{}.txt'.format(arxiv_id)
with open(arxiv_filepath, 'r') as arxivfile:
content = arxivfile.read()
words = content.split()
# Replace all {{cite:ac7d7c84-d6e0-461d-a1fc-36f7ee323c07}}, i.e. \{\}cite:.*\}\}
# Get all the word indices which need to be replaced and put it in a dict with
# the corresponding mag id from the db.
wordindex_magid_dict = dict()
for i, word in enumerate(words):
if word.startswith('{{cite'):
# Skip first 7 characters, uuid is the part between {{cite and }}
uuid = word[7:word.find('}}')]
#print(uuid)
# Use uuid to get the mag id from the contexts db
query = "select mag_id from bibitemmagidmap where uuid='{}'".format(uuid)
ccur.execute(query)
# Res is a tuple of one element E.g.: ('2133990480',)
res = ccur.fetchone()
if res is None:
# If the uuid does not map to a mag id, replace with the word citation.
wordindex_magid_dict[i] = 'citation'
else:
fetched_magid = res[0]
# Add to the seen set
allmagpaperids.add(fetched_magid)
writer.writerow({'citing_arxiv_id': arxiv_id, 'citing_mag_id': mag_id,
'cited_mag_id': fetched_magid})
wordindex_magid_dict[i] = '{}{}{}'.format(docid_prefix, res[0], docid_suffix)
# Do the replacements in the words list
for index, replacement_citation in wordindex_magid_dict.items():
words[index] = replacement_citation
# Convert the words list back to a string
# Make sure to add the citing paper mag id as the first word in the line
content = '{} {}\n'.format(mag_id, ' '.join(words))
return content
"""
def read_arxiv_addmagids(arxiv_id, mag_id, ccur):
""" Read arxiv full text, replace citations with mag id """
arxiv_filepath = '/vol2/unarXive/arxiv-txt-data/{}.txt'.format(arxiv_id)
with open(arxiv_filepath, 'r') as arxivfile:
content = arxivfile.read().replace('\n', ' ')
# Replace all {{cite:ac7d7c84-d6e0-461d-a1fc-36f7ee323c07}}, i.e. \{\}cite:.*\}\}
# Get all the word indices which need to be replaced and put it in a dict with
# the corresponding mag id from the db.
# Do the replacements in the words list
content = citation_pattern.sub(get_mag_from_uuid, content)
# Convert the words list back to a string
# Make sure to add the citing paper mag id as the first word in the line
content = '{} {}\n'.format(mag_id, content)
return content
def clean_text(text):
""" Cleans the text in the only argument in various steps
ARGUMENTS: text: content/title, string
RETURNS: cleaned text, string"""
# Replace newlines by space. We want only one doc vector.
text = text.replace('\n', ' ').lower()
# Remove URLs
#text = re.sub(r"http\S+", "", text)
# Expand contractions: you're to you are and so on.
text = contractions.fix(text)
# Remove stop words
#text = preprocessing.remove_stopwords(text)
#text = preprocessing.strip_tags(text)
# Remove punctuation -- all special characters
text = preprocessing.strip_multiple_whitespaces(preprocessing.strip_punctuation(text))
return text
def add_additional_papers():
""" Add additional papers for which full text from Arxiv is not present. Care is taken that while
adding references to THESE papers, these references should be in the set of papers stored
in the allmagpaperids set (otherwise, there will be additional papers in the reference part
of the concat contexts which are not in the files in the text.
ALSO NOTE that allmagpaperids contains all papers which either cite or are cited so far
inarxiv_papers_set contains the set of papers which are in arxiv (citing)
A set difference (allmagpaperids - inarxiv_papers_set) gives the set of mag_ids for which we
get additional text"""
with open('Pickles/inarxiv_papers_set.pickle', 'wb') as picc:
pickle.dump(inarxiv_papers_set, picc)
with open('Pickles/allmagpapers_en_magcontexts.pickle', 'wb') as picc2:
pickle.dump(allmagpaperids, picc2)
additional_mag_ids = allmagpaperids - inarxiv_papers_set
for paperid in tqdm(additional_mag_ids):
pcur.execute(magonly_query, (paperid, paperid))
# Get paperid, contexts, abstract, title, refids of current paper id
for row in pcur:
# row is a dict with keys:
# dict_keys(['paperid', 'papertitle', 'abstract', 'contexts', 'referenceids'])
paperid = row.get('paperid')
# Get all contexts and reference ids (delimiters set in the pSQL query)
contexts = row.get('contexts').replace('\n', ' ')
referenceids = row.get('referenceids')
title = clean_text(row.get('papertitle'))
abstract = clean_text(row.get('abstract'))
print(title)
# Get a single string for all the contexts
if contexts is not None and referenceids is not None:
contexts = contexts.split(' ||--|| ')
referenceids = referenceids.split(',')
contexts_with_refs = []
# Go through context, refid pairs, one at a time
for context, referenceid in zip(contexts, referenceids):
# VERY VERY IMPORTANT: check if the referenceid is not present in the allmagpaperids set,
# IGNORE IT! DESIGN DECISION: the other choice is to have a LOT of passes.
if referenceid in allmagpaperids:
contextlist = clean_text(context).split()
# Insert the reference id as the MIDDLE word of the context
# NOTE, when multiple reference ids are present, only 1 is inserted. Mag issue.
# In the eg. nips file, it's like this: this paper uses our previous work on weight space
# probabilities =-=nips05_0451-=- =-=nips05_0507-=-.
index_to_insert = len(contextlist) // 2
value_to_insert = docid_prefix + referenceid + docid_suffix
# Add the ref id with the prefix and suffix
contextlist.insert(index_to_insert, value_to_insert)
# Insert the context with ref id into the contexts_with_refs list
contexts_with_refs.append(' '.join(contextlist))
# else: do nothing, next iteration
# After all the contexts azre iterated to, make them a string.
contexts_concatenated = ' '.join(contexts_with_refs)
else:
contexts_concatenated = ''
# Do not write these to file????? OR
# Concatenate the paperid, title, abstract and the contexts together.
content = "{} {} {} {}\n".format(paperid, title, abstract, contexts_concatenated)
content = to_unicode(content)
# Get rid of empty files
#parts = content.split()
#parts = [ word for word in parts if not all(letter in string.punctuation for letter in word)]
#print(parts)
#content = ' '.join(parts)
if content.strip() != '':
fulltext_file.write(content)
print("Written file for {}".format(paperid))
def main():
""" """
scur.execute(trainingquery)
with open('arxiv_hd2v_training.txt', 'w') as file:
for row in tqdm(scur):
# Each row is a tuple. E.g.: ('0704.0047', '2042449614')
# Open the file
arxiv_id = row[0]
mag_id = row[1]
allmagpaperids.add(mag_id)
content = read_arxiv_addmagids(arxiv_id, mag_id, ccur)
file.write(content)
# Add additional content : abstact + title + concatenated contexts from MAG
# Note that the citation marker (cited paper id) is always placed bang in the centre
# of the context.
add_additional_papers()
# Close files and db connections
arxiv_citing_cited_file.close()
sconn.close()
cconn.close()
pconn.close()
if __name__ == '__main__':
main() | 46.083624 | 114 | 0.647739 |
f29f8c2dfbd155d19c32f977f576e25d84202b32 | 3,236 | py | Python | BPt/pipeline/tests/helpers.py | sahahn/ABCD_ML | a8b1c48c33f3fdc046c8922964f1c456273238da | [
"MIT"
] | 1 | 2019-09-25T23:23:49.000Z | 2019-09-25T23:23:49.000Z | BPt/pipeline/tests/helpers.py | sahahn/ABCD_ML | a8b1c48c33f3fdc046c8922964f1c456273238da | [
"MIT"
] | 1 | 2020-04-20T20:53:27.000Z | 2020-04-20T20:53:27.000Z | BPt/pipeline/tests/helpers.py | sahahn/ABCD_ML | a8b1c48c33f3fdc046c8922964f1c456273238da | [
"MIT"
] | 1 | 2019-06-21T14:44:40.000Z | 2019-06-21T14:44:40.000Z | import os
import tempfile
import numpy as np
from ...dataset.data_file import DataFile
from ...main.input import ProblemSpec
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_selection._base import SelectorMixin
from ...main.input import ParamSearch
from ...main.CV import BPtCV, CVStrategy
from ...dataset.dataset import Dataset
class ToFixedTransformer(BaseEstimator, TransformerMixin):
def __init__(self, to, n_jobs=1):
self.to = to
self.n_jobs = n_jobs
def fit(self, X, y):
self.n_features_in_ = X.shape[1]
return self
def transform(self, X):
X_trans = np.zeros(X.shape)
X_trans[:] = self.to
return X_trans
class IdentityListLoader(BaseEstimator, TransformerMixin):
needs_mapping = True
def __init__(self):
pass
def fit(self, X, y=None, mapping=None):
assert mapping is not None
return self
def transform(self, X):
assert isinstance(X, list)
X_trans = []
for x in X:
X_trans.append(x.flatten())
return np.array(X_trans)
class FakeSelector(SelectorMixin, BaseEstimator):
def __init__(self, mask):
self.mask = mask
def fit(self, X, y):
return self
def _get_support_mask(self):
return self.mask
def get_param_search():
param_search = ParamSearch(search_type='RandomSearch',
cv='default',
n_iter=10,
scorer='default',
weight_scorer=False,
mp_context='loky',
n_jobs='default',
dask_ip=None,
memmap_X=False,
search_only_params=None,
progress_loc=None)
ps = ProblemSpec(random_state=1,
n_jobs=2,
problem_type='regression')
ps_dict = param_search._as_dict(ps)
ps_dict['cv'] = BPtCV(splits=3, n_repeats=1,
cv_strategy=CVStrategy(), splits_vals=None)
return ps_dict
def get_temp_files(n):
temp_dr = tempfile.gettempdir()
return [os.path.join(temp_dr, 'test_' + str(i) + '.npy') for i in range(n)]
def get_fake_mapping(n):
locs = get_temp_files(n)
mapping = {}
for i, loc in enumerate(locs):
data = np.zeros((2, 2))
data[:] = i
np.save(loc, data)
mapping[i] = DataFile(loc=loc, load_func=np.load)
return mapping
def clean_fake_mapping(n):
locs = get_temp_files(n)
for loc in locs:
os.unlink(loc)
def get_fake_data_dataset(data_keys=None,
cat_keys=None):
if data_keys is None:
data_keys = []
if cat_keys is None:
cat_keys = []
dataset = Dataset()
for key in data_keys:
dataset[key] = []
dataset.set_role(key, 'data', inplace=True)
for key in cat_keys:
dataset[key] = []
dataset.set_role(key, 'data', inplace=True)
dataset.add_scope(key, 'category', inplace=True)
dataset._check_scopes()
return dataset
| 23.280576 | 79 | 0.572002 |
a810f8fc77fe86b99b85db17ab68ecd3fc8b96be | 2,515 | py | Python | tests/yhoo/test_yhoo_ticker_bundle.py | hootnot/virtual-yahoofinance-REST-API | 3246d3f4c14821e4ef0f9de57dd759cf03f42681 | [
"Apache-2.0"
] | 1 | 2022-03-18T08:27:34.000Z | 2022-03-18T08:27:34.000Z | tests/yhoo/test_yhoo_ticker_bundle.py | hootnot/virtual-yahoofinance-REST-API | 3246d3f4c14821e4ef0f9de57dd759cf03f42681 | [
"Apache-2.0"
] | null | null | null | tests/yhoo/test_yhoo_ticker_bundle.py | hootnot/virtual-yahoofinance-REST-API | 3246d3f4c14821e4ef0f9de57dd759cf03f42681 | [
"Apache-2.0"
] | 1 | 2021-06-18T02:14:03.000Z | 2021-06-18T02:14:03.000Z | # -*- coding: utf-8 -*-
import pytest
# from .unittestsetup import environment as environment
from ..unittestsetup import fetchTestData, fetchRawData, fetchFullResponse
from ..unittestsetup import API_URL, client
import requests_mock
from virtual_finance_api.client import Client
from virtual_finance_api.exceptions import ( # noqa F401
ConversionHookError,
VirtualFinanceAPIError,
)
from virtual_finance_api.endpoints.yahoo.ticker_bundle import responses
import virtual_finance_api.endpoints.yahoo as yh
from virtual_finance_api.endpoints.decorators import endpoint
TEST_ENDPOINT = "my/{ticker}/test"
@endpoint(TEST_ENDPOINT)
class MyYhoo(yh.ticker_bundle.Yhoo):
def __init__(self, ticker):
super(MyYhoo, self).__init__(ticker)
def test__MyYhoo():
"""derived Yhoo request."""
ticker = "IBM"
r = MyYhoo(ticker)
assert str(r) == TEST_ENDPOINT.format(ticker=ticker) and r.ticker == ticker
@pytest.mark.parametrize(
"cls,ticker,tid,useFullResponse,rawFile",
[
(yh.Holders, "IBM", "_yh_holders", True, "yahoo_holders.raw"),
(yh.Profile, "IBM", "_yh_profile", True, "yahoo_profile.raw"),
(yh.Financials, "IBM", "_yh_financials", True, "yahoo_financials.raw"),
(yh.Options, "IBM", "_yh_options", True, "yahoo_options.raw"),
],
)
def test__requests(
requests_mock, client, cls, ticker, tid, useFullResponse, rawFile, **kwargs
): # noqa E501
resp, data = fetchTestData(responses, tid)
if useFullResponse:
# refactor:
tid = tid.replace("_yh", "_je")
resp = fetchFullResponse(tid)
r = cls(ticker)
r.DOMAIN = API_URL
rawdata = fetchRawData(rawFile)
requests_mock.register_uri("GET", "{}/{}".format(API_URL, r), text=rawdata)
client.request(r)
assert r.response == resp
@pytest.mark.parametrize(
"cls,ticker,tid,useFullResponse,rawFile",
[
(yh.Holders, "IBM", "_yh_holders", True, "yahoo_holders.raw"),
(yh.Profile, "IBM", "_yh_profile", True, "yahoo_profile.raw"),
(yh.Financials, "IBM", "_yh_financials", True, "yahoo_financials.raw"),
],
)
def test__excep(
requests_mock, client, cls, ticker, tid, useFullResponse, rawFile, **kwargs
): # noqa E501
r = cls(ticker)
r.DOMAIN = API_URL
rawdata = "" # fetchRawData(rawFile)
requests_mock.register_uri("GET", "{}/{}".format(API_URL, r), text=rawdata)
with pytest.raises(VirtualFinanceAPIError) as cErr:
client.request(r)
assert 422 == cErr.value.code
| 32.24359 | 79 | 0.689463 |
588d1ec4526f81cb441ef065b1ec8e8a7a8ccaad | 177 | py | Python | tests/helpers.py | Sceptre/sceptre-core | 83818e69d4e3c35c2a048240f7bc35f66d989db5 | [
"Apache-2.0"
] | null | null | null | tests/helpers.py | Sceptre/sceptre-core | 83818e69d4e3c35c2a048240f7bc35f66d989db5 | [
"Apache-2.0"
] | 1 | 2019-10-22T08:52:23.000Z | 2019-10-22T08:52:23.000Z | tests/helpers.py | Sceptre/sceptre-core | 83818e69d4e3c35c2a048240f7bc35f66d989db5 | [
"Apache-2.0"
] | null | null | null | from sceptre.provider.stack import Stack
class StackImp(Stack):
def __eq__(self):
pass
def __repr__(self):
pass
def template(self):
pass
| 13.615385 | 40 | 0.610169 |
00a2f82e3cbfb9a734fdd5d6a1af71d8ed79ffbb | 1,167 | py | Python | examples/Imagine/utils.py | smbct/LOLH | a9b608b500c83731db2c7dcb70e08cf9a2a94fe0 | [
"MIT"
] | 2 | 2022-01-13T18:53:03.000Z | 2022-01-13T18:53:42.000Z | examples/Imagine/utils.py | smbct/LOLH | a9b608b500c83731db2c7dcb70e08cf9a2a94fe0 | [
"MIT"
] | null | null | null | examples/Imagine/utils.py | smbct/LOLH | a9b608b500c83731db2c7dcb70e08cf9a2a94fe0 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import numpy as np
import pandas as pd
#-------------------------------------------------------------------------------
def print_lines(df):
for index in df.index:
# print(df.loc[index])
line = index + ' & '
ind = 0
for val in df.loc[index]:
line += '$'
# line += str(val)
val_str = np.format_float_scientific(val, precision = 2, exp_digits=3)
if val_str[-3:] == '000':
val_str = val_str[:-5]
line += val_str
line += '$'
if ind < df.shape[1]-1:
line += ' & '
ind += 1
line += ' \\\\ \hline'
print(line)
return
# import a csv file from DE testing and output a tex formatted text
# filename = 'NK_classification_markers.csv'
# df = pd.read_csv(filename, index_col=0)
# df = df.loc[df.index[:30]]
# filename = 'c6_markers.csv'
# # filename = 'c5_DE.csv'
#
# df = pd.read_csv(filename, index_col=0)
# df_sub = df.loc[df.index[:20]]
# print_lines(df_sub)
# print('\n\n')
# df_sub = df.loc[df.index[-20:]]
# print_lines(df_sub)
# print(df.head(), '\n\n')
| 22.018868 | 82 | 0.495287 |
7a9c541af7df3a408e57f25cdb914838d1ac7072 | 13,639 | py | Python | sockeye/lr_scheduler.py | msobrevillac/sockeye | 6ed5e2dbe003673d03272987b79f73bdee86283d | [
"Apache-2.0"
] | null | null | null | sockeye/lr_scheduler.py | msobrevillac/sockeye | 6ed5e2dbe003673d03272987b79f73bdee86283d | [
"Apache-2.0"
] | null | null | null | sockeye/lr_scheduler.py | msobrevillac/sockeye | 6ed5e2dbe003673d03272987b79f73bdee86283d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from math import sqrt
from typing import List, Optional, Tuple
import sockeye.constants as C
from sockeye.utils import check_condition
logger = logging.getLogger(__name__)
class LearningRateScheduler:
def __init__(self, warmup: int = 0) -> None:
self.base_lr = None # Note: will be overwritten by MXNet optimizer
check_condition(warmup >= 0, "warmup needs to be >= 0.")
self.warmup = warmup
self.log_warmup_every_t = self.warmup // 10
self.last_warmup_log = -1
def __call__(self, num_updates):
pass
def _warmup(self, num_updates):
"""
Returns linearly increasing fraction of base_lr.
"""
assert self.base_lr is not None
if not self.warmup:
return self.base_lr
fraction = (num_updates + 1) * self.base_lr / (self.warmup + 1)
if num_updates > self.last_warmup_log and num_updates % self.log_warmup_every_t == 0:
self.last_warmup_log = num_updates
logger.info("Learning rate %.0f%% warmed up", fraction * 100)
return fraction
class AdaptiveLearningRateScheduler(LearningRateScheduler):
"""
Learning rate scheduler that implements `new_evaluation_result` and accordingly adaptively adjust the learning
rate.
"""
def new_evaluation_result(self, has_improved: bool) -> bool:
"""
Returns true if the parameters should be reset to the ones with the best validation score.
:param has_improved: Whether the model improved on held-out validation data.
:return: True if parameters should be reset to the ones with best validation score.
"""
return False
class LearningRateSchedulerFixedStep(AdaptiveLearningRateScheduler):
"""
Use a fixed schedule of learning rate steps: lr_1 for N steps, lr_2 for M steps, etc.
:param steps: List of learning rate step tuples in the form (rate, num_updates).
"""
def __init__(self, schedule: List[Tuple[float, int]], updates_per_checkpoint: int) -> None:
super().__init__()
check_condition(all(num_updates > 0 for (_, num_updates) in schedule),
"num_updates for each step should be > 0.")
check_condition(all(num_updates % updates_per_checkpoint == 0 for (_, num_updates) in schedule),
"num_updates for each step should be divisible by updates_per_checkpoint.")
self.schedule = schedule
self.current_step = 0
self.current_rate = 0.
self.current_step_num_updates = 0
self.current_step_started_at = 0
self.next_step_at = 0
self.latest_t = 0
self._update_rate(self.current_step)
def new_evaluation_result(self, has_improved: bool) -> bool:
"""
Returns true if the parameters should be reset to the ones with the best validation score.
:param has_improved: Whether the model improved on held-out validation data.
:return: True if parameters should be reset to the ones with best validation score.
"""
logger.info("Checkpoint learning rate: %1.2e (%d/%d updates)",
self.current_rate,
self.latest_t - self.current_step_started_at,
self.current_step_num_updates)
if self.latest_t >= self.next_step_at:
self.current_step += 1
self._update_rate(self.current_step)
return False
def _update_rate(self, step: int):
if self.current_step < len(self.schedule):
self.current_rate, self.current_step_num_updates = self.schedule[step]
self.current_step_started_at = self.latest_t
self.next_step_at += self.current_step_num_updates
logger.info("Changing learning rate to %1.2e for %d updates",
self.current_rate,
self.current_step_num_updates)
def __call__(self, t: int):
self.latest_t = max(t, self.latest_t)
return self.current_rate
@staticmethod
def parse_schedule_str(schedule_str: str) -> List[Tuple[float, int]]:
"""
Parse learning schedule string.
:param schedule_str: String in form rate1:num_updates1[,rate2:num_updates2,...]
:return: List of tuples (learning_rate, num_updates).
"""
schedule = list()
for step in schedule_str.split(","):
rate, num_updates = step.split(":")
schedule.append((float(rate), int(num_updates)))
return schedule
class LearningRateSchedulerInvSqrtT(LearningRateScheduler):
"""
Learning rate schedule: lr / sqrt(1 + factor * t).
Note: The factor is calculated from the half life of the learning rate.
:param updates_per_checkpoint: Number of batches between checkpoints.
:param half_life: Half life of the learning rate in number of checkpoints.
:param warmup: Number of (linear) learning rate increases to warm-up.
"""
def __init__(self, updates_per_checkpoint: int, half_life: int, warmup: int = 0) -> None:
super().__init__(warmup)
check_condition(updates_per_checkpoint > 0, "updates_per_checkpoint needs to be > 0.")
check_condition(half_life > 0, "half_life needs to be > 0.")
# 0.5 base_lr = base_lr * sqrt(1 + T * factor)
# then factor = 3 ./ T, with T = half_life * updates_per_checkpoint
self.factor = 3. / (half_life * updates_per_checkpoint)
self.t_last_log = -1
self.log_every_t = int(half_life * updates_per_checkpoint)
def __call__(self, num_updates: int):
lr = min(self.base_lr / sqrt(1 + num_updates * self.factor), self._warmup(num_updates) if self.warmup > 0 else 99999)
# Note: this method is called once per parameter for the same t. Making sure to just log once.
if num_updates > self.t_last_log and num_updates % self.log_every_t == 0:
logger.info("Learning rate currently at %1.2e", lr)
self.t_last_log = num_updates
return lr
class LearningRateSchedulerInvT(LearningRateScheduler):
"""
Learning rate schedule: lr / (1 + factor * t).
Note: The factor is calculated from the half life of the learning rate.
:param updates_per_checkpoint: Number of batches between checkpoints.
:param half_life: Half life of the learning rate in number of checkpoints.
"""
def __init__(self, updates_per_checkpoint: int, half_life: int, warmup: int = 0) -> None:
super().__init__(warmup)
check_condition(updates_per_checkpoint > 0, "updates_per_checkpoint needs to be > 0.")
check_condition(half_life > 0, "half_life needs to be > 0.")
# 0.5 base_lr = base_lr * (1 + T * factor)
# then factor = 1 ./ T, with T = half_life * updates_per_checkpoint
self.factor = 1. / (half_life * updates_per_checkpoint)
self.t_last_log = -1
self.log_every_t = int(half_life * updates_per_checkpoint)
def __call__(self, num_updates: int):
lr = min(self.base_lr / (1 + num_updates * self.factor), self._warmup(num_updates) if self.warmup > 0 else 99999)
# Note: this method is called once per parameter for the same t. Making sure to just log once.
if num_updates > self.t_last_log and num_updates % self.log_every_t == 0:
logger.info("Learning rate currently at %1.2e", lr)
self.t_last_log = num_updates
return lr
class LearningRateSchedulerPlateauReduce(AdaptiveLearningRateScheduler):
"""
Lower the learning rate as soon as the validation score plateaus.
:param reduce_factor: Factor to reduce learning rate with.
:param reduce_num_not_improved: Number of checkpoints with no improvement after which learning rate is reduced.
"""
def __init__(self, reduce_factor: float, reduce_num_not_improved: int, warmup: int = 0) -> None:
super().__init__(warmup)
check_condition(0.0 < reduce_factor <= 1, "reduce_factor should be in ]0,1].")
self.reduce_factor = reduce_factor
self.reduce_num_not_improved = reduce_num_not_improved
self.num_not_improved = 0
self.lr = None # type: float
self.t_last_log = -1
self.warmed_up = not self.warmup > 0
logger.info("Will reduce the learning rate by a factor of %.2f whenever"
" the validation score doesn't improve %d times.",
reduce_factor, reduce_num_not_improved)
def new_evaluation_result(self, has_improved: bool) -> bool:
"""
Returns true if the parameters should be reset to the ones with the best validation score.
:param has_improved: Whether the model improved on held-out validation data.
:return: True if parameters should be reset to the ones with best validation score.
"""
if self.lr is None:
assert self.base_lr is not None
self.lr = self.base_lr
if has_improved:
self.num_not_improved = 0
else:
self.num_not_improved += 1
if self.num_not_improved >= self.reduce_num_not_improved and self.reduce_factor < 1.0 and self.warmed_up:
old_lr = self.lr
self.lr *= self.reduce_factor
logger.info("%d checkpoints since improvement or rate scaling, "
"lowering learning rate: %1.2e -> %1.2e", self.num_not_improved, old_lr, self.lr)
self.num_not_improved = 0
return True
return False
def __call__(self, t):
if self.lr is None:
assert self.base_lr is not None
self.lr = self.base_lr
lr = self._warmup(t) if self.warmup > 0 and t <= self.warmup else self.lr
if t == self.warmup:
self.warmed_up = True
return lr
def __repr__(self):
return "LearningRateSchedulerPlateauReduce(reduce_factor=%.2f, " \
"reduce_num_not_improved=%d)" % (self.reduce_factor, self.num_not_improved)
def get_lr_scheduler(scheduler_type: str,
updates_per_checkpoint: int,
learning_rate_half_life: int,
learning_rate_reduce_factor: float,
learning_rate_reduce_num_not_improved: int,
learning_rate_schedule: Optional[List[Tuple[float, int]]] = None,
learning_rate_warmup: Optional[int] = 0) -> Optional[LearningRateScheduler]:
"""
Returns a learning rate scheduler.
:param scheduler_type: Scheduler type.
:param updates_per_checkpoint: Number of batches between checkpoints.
:param learning_rate_half_life: Half life of the learning rate in number of checkpoints.
:param learning_rate_reduce_factor: Factor to reduce learning rate with.
:param learning_rate_reduce_num_not_improved: Number of checkpoints with no improvement after which learning rate is
reduced.
:param learning_rate_warmup: Number of batches that the learning rate is linearly increased.
:raises: ValueError if unknown scheduler_type
:return: Learning rate scheduler.
"""
check_condition(learning_rate_schedule is None or scheduler_type == C.LR_SCHEDULER_FIXED_STEP,
"Learning rate schedule can only be used with '%s' learning rate scheduler."
% C.LR_SCHEDULER_FIXED_STEP)
if scheduler_type is None:
return None
if scheduler_type == C.LR_SCHEDULER_FIXED_RATE_INV_SQRT_T:
return LearningRateSchedulerInvSqrtT(updates_per_checkpoint, learning_rate_half_life, learning_rate_warmup)
elif scheduler_type == C.LR_SCHEDULER_FIXED_RATE_INV_T:
return LearningRateSchedulerInvT(updates_per_checkpoint, learning_rate_half_life, learning_rate_warmup)
elif scheduler_type == C.LR_SCHEDULER_FIXED_STEP:
check_condition(learning_rate_schedule is not None,
"learning_rate_schedule needed for %s scheduler" % C.LR_SCHEDULER_FIXED_STEP)
return LearningRateSchedulerFixedStep(learning_rate_schedule, updates_per_checkpoint)
elif scheduler_type == C.LR_SCHEDULER_PLATEAU_REDUCE:
check_condition(learning_rate_reduce_factor is not None,
"learning_rate_reduce_factor needed for %s scheduler" % C.LR_SCHEDULER_PLATEAU_REDUCE)
check_condition(learning_rate_reduce_num_not_improved is not None,
"learning_rate_reduce_num_not_improved needed for %s scheduler" % C.LR_SCHEDULER_PLATEAU_REDUCE)
if learning_rate_reduce_factor >= 1.0:
logger.warning("Not using %s learning rate scheduling: learning_rate_reduce_factor == 1.0"
% C.LR_SCHEDULER_PLATEAU_REDUCE)
return None
return LearningRateSchedulerPlateauReduce(learning_rate_reduce_factor, learning_rate_reduce_num_not_improved,
learning_rate_warmup)
else:
raise ValueError("Unknown learning rate scheduler type %s." % scheduler_type)
| 46.077703 | 125 | 0.669844 |
db567bae1b48f5aba8649d6d11e52a7cf4d36de9 | 2,204 | py | Python | get-DynamoDB-value.py | dongwon18/AWS_IoT_SensorMonitoring | e19b76fd1ff7c6108e77c6d759390d3559481de0 | [
"MIT"
] | null | null | null | get-DynamoDB-value.py | dongwon18/AWS_IoT_SensorMonitoring | e19b76fd1ff7c6108e77c6d759390d3559481de0 | [
"MIT"
] | null | null | null | get-DynamoDB-value.py | dongwon18/AWS_IoT_SensorMonitoring | e19b76fd1ff7c6108e77c6d759390d3559481de0 | [
"MIT"
] | null | null | null | # import the json utility package since we will be working with a JSON object
import json
# import the AWS SDK (for Python the package name is boto3)
import boto3
from boto3.dynamodb.conditions import Key
# import two packages to help us with dates and date formatting
from datetime import datetime, timedelta, timezone
import time
# create a DynamoDB object using the AWS SDK
dynamodb = boto3.resource('dynamodb')
# use the DynamoDB object to select our table
table = dynamodb.Table('Sensor_data')
# store the current time in a human readable format in a variable
# define the handler function that the Lambda service will use as an entry point
def lambda_handler(event, context):
now = datetime.now() # '2021-08-24 05:26:20'
now_timestamp = int(time.mktime(now.timetuple())*1000) #int(time.mktime(datetime.strptime(now, '%Y-%m-%d %H:%M:%S').timetuple()) * 1000)
print(now_timestamp)
start = now - timedelta(seconds = 10) #'2021-08-24 05:25:58'
start_timestamp = int(time.mktime(start.timetuple())*1000) #int(time.mktime(datetime.strptime(start, '%Y-%m-%d %H:%M:%S').timetuple()) * 1000)
print(start_timestamp)
client_Id = event['client_Id']
# write name and time to the DynamoDB table using the object we instantiated and save response in a variable
response = table.query(
Limit = 3,
KeyConditionExpression = Key('client_Id').eq(client_Id) & Key('store_time').between(start_timestamp, now_timestamp)
)
# return a properly formatted JSON object
data = response['Items']
if(len(data) == 0):
print("no such data")
return{
'statusCode': 204
}
else:
temp = round(data[0]['sensor_data']['temperature'], 3)
humidity = round(data[0]['sensor_data']['humidity'], 3)
stime = data[0]['store_time']
KST = timezone(timedelta(hours=9))
date = datetime.fromtimestamp(stime/1000, KST).strftime('%Y-%m-%d %H:%M:%S')
json_form = {"time": date, "temp": str(temp), "humidity": str(humidity)}
return {
'statusCode': 200,
'body': json.dumps(json_form)
}
| 36.733333 | 147 | 0.647913 |
bdf24a2bdd6735c76e4b683ed636e027ffd4708d | 196 | py | Python | learning_logs/admin.py | Favorsiki/LearningLog | a71b2c006ea0888c884d0e3b534726dd66ab5720 | [
"MIT"
] | null | null | null | learning_logs/admin.py | Favorsiki/LearningLog | a71b2c006ea0888c884d0e3b534726dd66ab5720 | [
"MIT"
] | null | null | null | learning_logs/admin.py | Favorsiki/LearningLog | a71b2c006ea0888c884d0e3b534726dd66ab5720 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from learning_logs.models import Topic
from learning_logs.models import Entry
admin.site.register(Topic)
admin.site.register(Entry)
| 21.777778 | 38 | 0.826531 |
3d3646316636cb207d3955ff39740e8f23089f33 | 124,146 | py | Python | twisted/test/test_ftp.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | null | null | null | twisted/test/test_ftp.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | null | null | null | twisted/test/test_ftp.py | ioggstream/twisted | 34f9b1e3f097685839000c656332c66ee85be5d8 | [
"Unlicense",
"MIT"
] | null | null | null | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
FTP tests.
"""
import os
import errno
from StringIO import StringIO
import getpass
from zope.interface import implements
from zope.interface.verify import verifyClass
from twisted.trial import unittest
from twisted.python.randbytes import insecureRandom
from twisted.cred.portal import IRealm
from twisted.protocols import basic
from twisted.internet import reactor, task, protocol, defer, error
from twisted.internet.interfaces import IConsumer
from twisted.cred.error import UnauthorizedLogin
from twisted.cred import portal, checkers, credentials
from twisted.python import failure, filepath, runtime
from twisted.test import proto_helpers
from twisted.protocols import ftp, loopback
if runtime.platform.isWindows():
nonPOSIXSkip = "Cannot run on Windows"
else:
nonPOSIXSkip = None
class Dummy(basic.LineReceiver):
logname = None
def __init__(self):
self.lines = []
self.rawData = []
def connectionMade(self):
self.f = self.factory # to save typing in pdb :-)
def lineReceived(self,line):
self.lines.append(line)
def rawDataReceived(self, data):
self.rawData.append(data)
def lineLengthExceeded(self, line):
pass
class _BufferingProtocol(protocol.Protocol):
def connectionMade(self):
self.buffer = ''
self.d = defer.Deferred()
def dataReceived(self, data):
self.buffer += data
def connectionLost(self, reason):
self.d.callback(self)
class FTPServerTestCase(unittest.TestCase):
"""
Simple tests for an FTP server with the default settings.
@ivar clientFactory: class used as ftp client.
"""
clientFactory = ftp.FTPClientBasic
userAnonymous = "anonymous"
def setUp(self):
# Create a directory
self.directory = self.mktemp()
os.mkdir(self.directory)
self.dirPath = filepath.FilePath(self.directory)
# Start the server
p = portal.Portal(ftp.FTPRealm(
anonymousRoot=self.directory,
userHome=self.directory,
))
p.registerChecker(checkers.AllowAnonymousAccess(),
credentials.IAnonymous)
users_checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
self.username = "test-user"
self.password = "test-password"
users_checker.addUser(self.username, self.password)
p.registerChecker(users_checker, credentials.IUsernamePassword)
self.factory = ftp.FTPFactory(portal=p,
userAnonymous=self.userAnonymous)
port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.addCleanup(port.stopListening)
# Hook the server's buildProtocol to make the protocol instance
# accessible to tests.
buildProtocol = self.factory.buildProtocol
d1 = defer.Deferred()
def _rememberProtocolInstance(addr):
# Done hooking this.
del self.factory.buildProtocol
protocol = buildProtocol(addr)
self.serverProtocol = protocol.wrappedProtocol
def cleanupServer():
if self.serverProtocol.transport is not None:
self.serverProtocol.transport.loseConnection()
self.addCleanup(cleanupServer)
d1.callback(None)
return protocol
self.factory.buildProtocol = _rememberProtocolInstance
# Connect a client to it
portNum = port.getHost().port
clientCreator = protocol.ClientCreator(reactor, self.clientFactory)
d2 = clientCreator.connectTCP("127.0.0.1", portNum)
def gotClient(client):
self.client = client
self.addCleanup(self.client.transport.loseConnection)
d2.addCallback(gotClient)
return defer.gatherResults([d1, d2])
def assertCommandResponse(self, command, expectedResponseLines,
chainDeferred=None):
"""Asserts that a sending an FTP command receives the expected
response.
Returns a Deferred. Optionally accepts a deferred to chain its actions
to.
"""
if chainDeferred is None:
chainDeferred = defer.succeed(None)
def queueCommand(ignored):
d = self.client.queueStringCommand(command)
def gotResponse(responseLines):
self.assertEqual(expectedResponseLines, responseLines)
return d.addCallback(gotResponse)
return chainDeferred.addCallback(queueCommand)
def assertCommandFailed(self, command, expectedResponse=None,
chainDeferred=None):
if chainDeferred is None:
chainDeferred = defer.succeed(None)
def queueCommand(ignored):
return self.client.queueStringCommand(command)
chainDeferred.addCallback(queueCommand)
self.assertFailure(chainDeferred, ftp.CommandFailed)
def failed(exception):
if expectedResponse is not None:
self.assertEqual(
expectedResponse, exception.args[0])
return chainDeferred.addCallback(failed)
def _anonymousLogin(self):
d = self.assertCommandResponse(
'USER anonymous',
['331 Guest login ok, type your email address as password.'])
return self.assertCommandResponse(
'PASS test@twistedmatrix.com',
['230 Anonymous login ok, access restrictions apply.'],
chainDeferred=d)
def _userLogin(self):
"""Authenticates the FTP client using the test account."""
d = self.assertCommandResponse(
'USER %s' % (self.username),
['331 Password required for %s.' % (self.username)])
return self.assertCommandResponse(
'PASS %s' % (self.password),
['230 User logged in, proceed'],
chainDeferred=d)
class FTPAnonymousTestCase(FTPServerTestCase):
"""
Simple tests for an FTP server with different anonymous username.
The new anonymous username used in this test case is "guest"
"""
userAnonymous = "guest"
def test_anonymousLogin(self):
"""
Tests whether the changing of the anonymous username is working or not.
The FTP server should not comply about the need of password for the
username 'guest', letting it login as anonymous asking just an email
address as password.
"""
d = self.assertCommandResponse(
'USER guest',
['331 Guest login ok, type your email address as password.'])
return self.assertCommandResponse(
'PASS test@twistedmatrix.com',
['230 Anonymous login ok, access restrictions apply.'],
chainDeferred=d)
class BasicFTPServerTestCase(FTPServerTestCase):
def testNotLoggedInReply(self):
"""
When not logged in, most commands other than USER and PASS should
get NOT_LOGGED_IN errors, but some can be called before USER and PASS.
"""
loginRequiredCommandList = ['CDUP', 'CWD', 'LIST', 'MODE', 'PASV',
'PWD', 'RETR', 'STRU', 'SYST', 'TYPE']
loginNotRequiredCommandList = ['FEAT']
# Issue commands, check responses
def checkFailResponse(exception, command):
failureResponseLines = exception.args[0]
self.failUnless(failureResponseLines[-1].startswith("530"),
"%s - Response didn't start with 530: %r"
% (command, failureResponseLines[-1],))
def checkPassResponse(result, command):
result = result[0]
self.failIf(result.startswith("530"),
"%s - Response start with 530: %r"
% (command, result,))
deferreds = []
for command in loginRequiredCommandList:
deferred = self.client.queueStringCommand(command)
self.assertFailure(deferred, ftp.CommandFailed)
deferred.addCallback(checkFailResponse, command)
deferreds.append(deferred)
for command in loginNotRequiredCommandList:
deferred = self.client.queueStringCommand(command)
deferred.addCallback(checkPassResponse, command)
deferreds.append(deferred)
return defer.DeferredList(deferreds, fireOnOneErrback=True)
def testPASSBeforeUSER(self):
"""
Issuing PASS before USER should give an error.
"""
return self.assertCommandFailed(
'PASS foo',
["503 Incorrect sequence of commands: "
"USER required before PASS"])
def testNoParamsForUSER(self):
"""
Issuing USER without a username is a syntax error.
"""
return self.assertCommandFailed(
'USER',
['500 Syntax error: USER requires an argument.'])
def testNoParamsForPASS(self):
"""
Issuing PASS without a password is a syntax error.
"""
d = self.client.queueStringCommand('USER foo')
return self.assertCommandFailed(
'PASS',
['500 Syntax error: PASS requires an argument.'],
chainDeferred=d)
def testAnonymousLogin(self):
return self._anonymousLogin()
def testQuit(self):
"""
Issuing QUIT should return a 221 message.
"""
d = self._anonymousLogin()
return self.assertCommandResponse(
'QUIT',
['221 Goodbye.'],
chainDeferred=d)
def testAnonymousLoginDenied(self):
# Reconfigure the server to disallow anonymous access, and to have an
# IUsernamePassword checker that always rejects.
self.factory.allowAnonymous = False
denyAlwaysChecker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
self.factory.portal.registerChecker(denyAlwaysChecker,
credentials.IUsernamePassword)
# Same response code as allowAnonymous=True, but different text.
d = self.assertCommandResponse(
'USER anonymous',
['331 Password required for anonymous.'])
# It will be denied. No-one can login.
d = self.assertCommandFailed(
'PASS test@twistedmatrix.com',
['530 Sorry, Authentication failed.'],
chainDeferred=d)
# It's not just saying that. You aren't logged in.
d = self.assertCommandFailed(
'PWD',
['530 Please login with USER and PASS.'],
chainDeferred=d)
return d
def test_anonymousWriteDenied(self):
"""
When an anonymous user attempts to edit the server-side filesystem, they
will receive a 550 error with a descriptive message.
"""
d = self._anonymousLogin()
return self.assertCommandFailed(
'MKD newdir',
['550 Anonymous users are forbidden to change the filesystem'],
chainDeferred=d)
def testUnknownCommand(self):
d = self._anonymousLogin()
return self.assertCommandFailed(
'GIBBERISH',
["502 Command 'GIBBERISH' not implemented"],
chainDeferred=d)
def testRETRBeforePORT(self):
d = self._anonymousLogin()
return self.assertCommandFailed(
'RETR foo',
["503 Incorrect sequence of commands: "
"PORT or PASV required before RETR"],
chainDeferred=d)
def testSTORBeforePORT(self):
d = self._anonymousLogin()
return self.assertCommandFailed(
'STOR foo',
["503 Incorrect sequence of commands: "
"PORT or PASV required before STOR"],
chainDeferred=d)
def testBadCommandArgs(self):
d = self._anonymousLogin()
self.assertCommandFailed(
'MODE z',
["504 Not implemented for parameter 'z'."],
chainDeferred=d)
self.assertCommandFailed(
'STRU I',
["504 Not implemented for parameter 'I'."],
chainDeferred=d)
return d
def testDecodeHostPort(self):
self.assertEqual(ftp.decodeHostPort('25,234,129,22,100,23'),
('25.234.129.22', 25623))
nums = range(6)
for i in range(6):
badValue = list(nums)
badValue[i] = 256
s = ','.join(map(str, badValue))
self.assertRaises(ValueError, ftp.decodeHostPort, s)
def test_PASV(self):
"""
When the client sends the command C{PASV}, the server responds with a
host and port, and is listening on that port.
"""
# Login
d = self._anonymousLogin()
# Issue a PASV command
d.addCallback(lambda _: self.client.queueStringCommand('PASV'))
def cb(responseLines):
"""
Extract the host and port from the resonse, and
verify the server is listening of the port it claims to be.
"""
host, port = ftp.decodeHostPort(responseLines[-1][4:])
self.assertEqual(port, self.serverProtocol.dtpPort.getHost().port)
d.addCallback(cb)
# Semi-reasonable way to force cleanup
d.addCallback(lambda _: self.serverProtocol.transport.loseConnection())
return d
def test_SYST(self):
"""SYST command will always return UNIX Type: L8"""
d = self._anonymousLogin()
self.assertCommandResponse('SYST', ["215 UNIX Type: L8"],
chainDeferred=d)
return d
def test_RNFRandRNTO(self):
"""
Sending the RNFR command followed by RNTO, with valid filenames, will
perform a successful rename operation.
"""
# Create user home folder with a 'foo' file.
self.dirPath.child(self.username).createDirectory()
self.dirPath.child(self.username).child('foo').touch()
d = self._userLogin()
self.assertCommandResponse(
'RNFR foo',
["350 Requested file action pending further information."],
chainDeferred=d)
self.assertCommandResponse(
'RNTO bar',
["250 Requested File Action Completed OK"],
chainDeferred=d)
def check_rename(result):
self.assertTrue(
self.dirPath.child(self.username).child('bar').exists())
return result
d.addCallback(check_rename)
return d
def test_RNFRwithoutRNTO(self):
"""
Sending the RNFR command followed by any command other than RNTO
should return an error informing users that RNFR should be followed
by RNTO.
"""
d = self._anonymousLogin()
self.assertCommandResponse(
'RNFR foo',
["350 Requested file action pending further information."],
chainDeferred=d)
self.assertCommandFailed(
'OTHER don-tcare',
["503 Incorrect sequence of commands: RNTO required after RNFR"],
chainDeferred=d)
return d
def test_portRangeForwardError(self):
"""
Exceptions other than L{error.CannotListenError} which are raised by
C{listenFactory} should be raised to the caller of L{FTP.getDTPPort}.
"""
def listenFactory(portNumber, factory):
raise RuntimeError()
self.serverProtocol.listenFactory = listenFactory
self.assertRaises(RuntimeError, self.serverProtocol.getDTPPort,
protocol.Factory())
def test_portRange(self):
"""
L{FTP.passivePortRange} should determine the ports which
L{FTP.getDTPPort} attempts to bind. If no port from that iterator can
be bound, L{error.CannotListenError} should be raised, otherwise the
first successful result from L{FTP.listenFactory} should be returned.
"""
def listenFactory(portNumber, factory):
if portNumber in (22032, 22033, 22034):
raise error.CannotListenError('localhost', portNumber, 'error')
return portNumber
self.serverProtocol.listenFactory = listenFactory
port = self.serverProtocol.getDTPPort(protocol.Factory())
self.assertEqual(port, 0)
self.serverProtocol.passivePortRange = xrange(22032, 65536)
port = self.serverProtocol.getDTPPort(protocol.Factory())
self.assertEqual(port, 22035)
self.serverProtocol.passivePortRange = xrange(22032, 22035)
self.assertRaises(error.CannotListenError,
self.serverProtocol.getDTPPort,
protocol.Factory())
def test_portRangeInheritedFromFactory(self):
"""
The L{FTP} instances created by L{ftp.FTPFactory.buildProtocol} have
their C{passivePortRange} attribute set to the same object the
factory's C{passivePortRange} attribute is set to.
"""
portRange = xrange(2017, 2031)
self.factory.passivePortRange = portRange
protocol = self.factory.buildProtocol(None)
self.assertEqual(portRange, protocol.wrappedProtocol.passivePortRange)
def testFEAT(self):
"""
When the server receives 'FEAT', it should report the list of supported
features. (Additionally, ensure that the server reports various
particular features that are supported by all Twisted FTP servers.)
"""
d = self.client.queueStringCommand('FEAT')
def gotResponse(responseLines):
self.assertEqual('211-Features:', responseLines[0])
self.assertTrue(' MDTM' in responseLines)
self.assertTrue(' PASV' in responseLines)
self.assertTrue(' TYPE A;I' in responseLines)
self.assertTrue(' SIZE' in responseLines)
self.assertEqual('211 End', responseLines[-1])
return d.addCallback(gotResponse)
def testOPTS(self):
"""
When the server receives 'OPTS something', it should report
that the FTP server does not support the option called 'something'.
"""
d = self._anonymousLogin()
self.assertCommandFailed(
'OPTS something',
["502 Option 'something' not implemented."],
chainDeferred=d,
)
return d
def test_STORreturnsErrorFromOpen(self):
"""
Any FTP error raised inside STOR while opening the file is returned
to the client.
"""
# We create a folder inside user's home folder and then
# we try to write a file with the same name.
# This will trigger an FTPCmdError.
self.dirPath.child(self.username).createDirectory()
self.dirPath.child(self.username).child('folder').createDirectory()
d = self._userLogin()
def sendPASV(result):
"""
Send the PASV command required before port.
"""
return self.client.queueStringCommand('PASV')
def mockDTPInstance(result):
"""
Fake an incoming connection and create a mock DTPInstance so
that PORT command will start processing the request.
"""
self.serverProtocol.dtpFactory.deferred.callback(None)
self.serverProtocol.dtpInstance = object()
return result
d.addCallback(sendPASV)
d.addCallback(mockDTPInstance)
self.assertCommandFailed(
'STOR folder',
["550 folder: is a directory"],
chainDeferred=d,
)
return d
def test_STORunknownErrorBecomesFileNotFound(self):
"""
Any non FTP error raised inside STOR while opening the file is
converted into FileNotFound error and returned to the client together
with the path.
The unknown error is logged.
"""
d = self._userLogin()
def failingOpenForWriting(ignore):
return defer.fail(AssertionError())
def sendPASV(result):
"""
Send the PASV command required before port.
"""
return self.client.queueStringCommand('PASV')
def mockDTPInstance(result):
"""
Fake an incoming connection and create a mock DTPInstance so
that PORT command will start processing the request.
"""
self.serverProtocol.dtpFactory.deferred.callback(None)
self.serverProtocol.dtpInstance = object()
self.serverProtocol.shell.openForWriting = failingOpenForWriting
return result
def checkLogs(result):
"""
Check that unknown errors are logged.
"""
logs = self.flushLoggedErrors()
self.assertEqual(1, len(logs))
self.assertIsInstance(logs[0].value, AssertionError)
d.addCallback(sendPASV)
d.addCallback(mockDTPInstance)
self.assertCommandFailed(
'STOR something',
["550 something: No such file or directory."],
chainDeferred=d,
)
d.addCallback(checkLogs)
return d
class FTPServerTestCaseAdvancedClient(FTPServerTestCase):
"""
Test FTP server with the L{ftp.FTPClient} class.
"""
clientFactory = ftp.FTPClient
def test_anonymousSTOR(self):
"""
Try to make an STOR as anonymous, and check that we got a permission
denied error.
"""
def eb(res):
res.trap(ftp.CommandFailed)
self.assertEqual(res.value.args[0][0],
'550 foo: Permission denied.')
d1, d2 = self.client.storeFile('foo')
d2.addErrback(eb)
return defer.gatherResults([d1, d2])
def test_STORtransferErrorIsReturned(self):
"""
Any FTP error raised by STOR while transferring the file is returned
to the client.
"""
# Make a failing file writer.
class FailingFileWriter(ftp._FileWriter):
def receive(self):
return defer.fail(ftp.IsADirectoryError("failing_file"))
def failingSTOR(a, b):
return defer.succeed(FailingFileWriter(None))
# Monkey patch the shell so it returns a file writer that will
# fail during transfer.
self.patch(ftp.FTPAnonymousShell, 'openForWriting', failingSTOR)
def eb(res):
res.trap(ftp.CommandFailed)
logs = self.flushLoggedErrors()
self.assertEqual(1, len(logs))
self.assertIsInstance(logs[0].value, ftp.IsADirectoryError)
self.assertEqual(
res.value.args[0][0],
"550 failing_file: is a directory")
d1, d2 = self.client.storeFile('failing_file')
d2.addErrback(eb)
return defer.gatherResults([d1, d2])
def test_STORunknownTransferErrorBecomesAbort(self):
"""
Any non FTP error raised by STOR while transferring the file is
converted into a critical error and transfer is closed.
The unknown error is logged.
"""
class FailingFileWriter(ftp._FileWriter):
def receive(self):
return defer.fail(AssertionError())
def failingSTOR(a, b):
return defer.succeed(FailingFileWriter(None))
# Monkey patch the shell so it returns a file writer that will
# fail during transfer.
self.patch(ftp.FTPAnonymousShell, 'openForWriting', failingSTOR)
def eb(res):
res.trap(ftp.CommandFailed)
logs = self.flushLoggedErrors()
self.assertEqual(1, len(logs))
self.assertIsInstance(logs[0].value, AssertionError)
self.assertEqual(
res.value.args[0][0],
"426 Transfer aborted. Data connection closed.")
d1, d2 = self.client.storeFile('failing_file')
d2.addErrback(eb)
return defer.gatherResults([d1, d2])
def test_RETRreadError(self):
"""
Any errors during reading a file inside a RETR should be returned to
the client.
"""
# Make a failing file reading.
class FailingFileReader(ftp._FileReader):
def send(self, consumer):
return defer.fail(ftp.IsADirectoryError("blah"))
def failingRETR(a, b):
return defer.succeed(FailingFileReader(None))
# Monkey patch the shell so it returns a file reader that will
# fail.
self.patch(ftp.FTPAnonymousShell, 'openForReading', failingRETR)
def check_response(failure):
self.flushLoggedErrors()
failure.trap(ftp.CommandFailed)
self.assertEqual(
failure.value.args[0][0],
"125 Data connection already open, starting transfer")
self.assertEqual(
failure.value.args[0][1],
"550 blah: is a directory")
proto = _BufferingProtocol()
d = self.client.retrieveFile('failing_file', proto)
d.addErrback(check_response)
return d
class FTPServerPasvDataConnectionTestCase(FTPServerTestCase):
def _makeDataConnection(self, ignored=None):
# Establish a passive data connection (i.e. client connecting to
# server).
d = self.client.queueStringCommand('PASV')
def gotPASV(responseLines):
host, port = ftp.decodeHostPort(responseLines[-1][4:])
cc = protocol.ClientCreator(reactor, _BufferingProtocol)
return cc.connectTCP('127.0.0.1', port)
return d.addCallback(gotPASV)
def _download(self, command, chainDeferred=None):
if chainDeferred is None:
chainDeferred = defer.succeed(None)
chainDeferred.addCallback(self._makeDataConnection)
def queueCommand(downloader):
# wait for the command to return, and the download connection to be
# closed.
d1 = self.client.queueStringCommand(command)
d2 = downloader.d
return defer.gatherResults([d1, d2])
chainDeferred.addCallback(queueCommand)
def downloadDone((ignored, downloader)):
return downloader.buffer
return chainDeferred.addCallback(downloadDone)
def test_LISTEmpty(self):
"""
When listing empty folders, LIST returns an empty response.
"""
d = self._anonymousLogin()
# No files, so the file listing should be empty
self._download('LIST', chainDeferred=d)
def checkEmpty(result):
self.assertEqual('', result)
return d.addCallback(checkEmpty)
def test_LISTWithBinLsFlags(self):
"""
LIST ignores requests for folder with names like '-al' and will list
the content of current folder.
"""
os.mkdir(os.path.join(self.directory, 'foo'))
os.mkdir(os.path.join(self.directory, 'bar'))
# Login
d = self._anonymousLogin()
self._download('LIST -aL', chainDeferred=d)
def checkDownload(download):
names = []
for line in download.splitlines():
names.append(line.split(' ')[-1])
self.assertEqual(2, len(names))
self.assertIn('foo', names)
self.assertIn('bar', names)
return d.addCallback(checkDownload)
def test_LISTWithContent(self):
"""
LIST returns all folder's members, each member listed on a separate
line and with name and other details.
"""
os.mkdir(os.path.join(self.directory, 'foo'))
os.mkdir(os.path.join(self.directory, 'bar'))
# Login
d = self._anonymousLogin()
# We expect 2 lines because there are two files.
self._download('LIST', chainDeferred=d)
def checkDownload(download):
self.assertEqual(2, len(download[:-2].split('\r\n')))
d.addCallback(checkDownload)
# Download a names-only listing.
self._download('NLST ', chainDeferred=d)
def checkDownload(download):
filenames = download[:-2].split('\r\n')
filenames.sort()
self.assertEqual(['bar', 'foo'], filenames)
d.addCallback(checkDownload)
# Download a listing of the 'foo' subdirectory. 'foo' has no files, so
# the file listing should be empty.
self._download('LIST foo', chainDeferred=d)
def checkDownload(download):
self.assertEqual('', download)
d.addCallback(checkDownload)
# Change the current working directory to 'foo'.
def chdir(ignored):
return self.client.queueStringCommand('CWD foo')
d.addCallback(chdir)
# Download a listing from within 'foo', and again it should be empty,
# because LIST uses the working directory by default.
self._download('LIST', chainDeferred=d)
def checkDownload(download):
self.assertEqual('', download)
return d.addCallback(checkDownload)
def _listTestHelper(self, command, listOutput, expectedOutput):
"""
Exercise handling by the implementation of I{LIST} or I{NLST} of certain
return values and types from an L{IFTPShell.list} implementation.
This will issue C{command} and assert that if the L{IFTPShell.list}
implementation includes C{listOutput} as one of the file entries then
the result given to the client is matches C{expectedOutput}.
@param command: Either C{b"LIST"} or C{b"NLST"}
@type command: L{bytes}
@param listOutput: A value suitable to be used as an element of the list
returned by L{IFTPShell.list}. Vary the values and types of the
contents to exercise different code paths in the server's handling
of this result.
@param expectedOutput: A line of output to expect as a result of
C{listOutput} being transformed into a response to the command
issued.
@type expectedOutput: L{bytes}
@return: A L{Deferred} which fires when the test is done, either with an
L{Failure} if the test failed or with a function object if it
succeeds. The function object is the function which implements
L{IFTPShell.list} (and is useful to make assertions about what
warnings might have been emitted).
@rtype: L{Deferred}
"""
# Login
d = self._anonymousLogin()
def patchedList(segments, keys=()):
return defer.succeed([listOutput])
def loggedIn(result):
self.serverProtocol.shell.list = patchedList
return result
d.addCallback(loggedIn)
self._download('%s something' % (command,), chainDeferred=d)
def checkDownload(download):
self.assertEqual(expectedOutput, download)
return patchedList
return d.addCallback(checkDownload)
def test_LISTUnicode(self):
"""
Unicode filenames returned from L{IFTPShell.list} are encoded using
UTF-8 before being sent with the response.
"""
return self._listTestHelper(
"LIST",
(u'my resum\xe9', (
0, 1, filepath.Permissions(0777), 0, 0, 'user', 'group')),
'drwxrwxrwx 0 user group '
'0 Jan 01 1970 my resum\xc3\xa9\r\n')
def test_LISTNonASCIIBytes(self):
"""
When LIST receive a filename as byte string from L{IFTPShell.list}
it will just pass the data to lower level without any change.
"""
return self._listTestHelper(
"LIST",
('my resum\xc3\xa9', (
0, 1, filepath.Permissions(0777), 0, 0, 'user', 'group')),
'drwxrwxrwx 0 user group '
'0 Jan 01 1970 my resum\xc3\xa9\r\n')
def testManyLargeDownloads(self):
# Login
d = self._anonymousLogin()
# Download a range of different size files
for size in range(100000, 110000, 500):
fObj = file(os.path.join(self.directory, '%d.txt' % (size,)), 'wb')
fObj.write('x' * size)
fObj.close()
self._download('RETR %d.txt' % (size,), chainDeferred=d)
def checkDownload(download, size=size):
self.assertEqual(size, len(download))
d.addCallback(checkDownload)
return d
def test_downloadFolder(self):
"""
When RETR is called for a folder, it will fail complaining that
the path is a folder.
"""
# Make a directory in the current working directory
self.dirPath.child('foo').createDirectory()
# Login
d = self._anonymousLogin()
d.addCallback(self._makeDataConnection)
def retrFolder(downloader):
downloader.transport.loseConnection()
deferred = self.client.queueStringCommand('RETR foo')
return deferred
d.addCallback(retrFolder)
def failOnSuccess(result):
raise AssertionError('Downloading a folder should not succeed.')
d.addCallback(failOnSuccess)
def checkError(failure):
failure.trap(ftp.CommandFailed)
self.assertEqual(
['550 foo: is a directory'], failure.value.message)
current_errors = self.flushLoggedErrors()
self.assertEqual(
0, len(current_errors),
'No errors should be logged while downloading a folder.')
d.addErrback(checkError)
return d
def test_NLSTEmpty(self):
"""
NLST with no argument returns the directory listing for the current
working directory.
"""
# Login
d = self._anonymousLogin()
# Touch a file in the current working directory
self.dirPath.child('test.txt').touch()
# Make a directory in the current working directory
self.dirPath.child('foo').createDirectory()
self._download('NLST ', chainDeferred=d)
def checkDownload(download):
filenames = download[:-2].split('\r\n')
filenames.sort()
self.assertEqual(['foo', 'test.txt'], filenames)
return d.addCallback(checkDownload)
def test_NLSTNonexistent(self):
"""
NLST on a non-existent file/directory returns nothing.
"""
# Login
d = self._anonymousLogin()
self._download('NLST nonexistent.txt', chainDeferred=d)
def checkDownload(download):
self.assertEqual('', download)
return d.addCallback(checkDownload)
def test_NLSTUnicode(self):
"""
NLST will receive Unicode filenames for IFTPShell.list, and will
encode them using UTF-8.
"""
return self._listTestHelper(
"NLST",
(u'my resum\xe9', (
0, 1, filepath.Permissions(0777), 0, 0, 'user', 'group')),
'my resum\xc3\xa9\r\n')
def test_NLSTNonASCIIBytes(self):
"""
NLST will just pass the non-Unicode data to lower level.
"""
return self._listTestHelper(
"NLST",
('my resum\xc3\xa9', (
0, 1, filepath.Permissions(0777), 0, 0, 'user', 'group')),
'my resum\xc3\xa9\r\n')
def test_NLSTOnPathToFile(self):
"""
NLST on an existent file returns only the path to that file.
"""
# Login
d = self._anonymousLogin()
# Touch a file in the current working directory
self.dirPath.child('test.txt').touch()
self._download('NLST test.txt', chainDeferred=d)
def checkDownload(download):
filenames = download[:-2].split('\r\n')
self.assertEqual(['test.txt'], filenames)
return d.addCallback(checkDownload)
class FTPServerPortDataConnectionTestCase(FTPServerPasvDataConnectionTestCase):
def setUp(self):
self.dataPorts = []
return FTPServerPasvDataConnectionTestCase.setUp(self)
def _makeDataConnection(self, ignored=None):
# Establish an active data connection (i.e. server connecting to
# client).
deferred = defer.Deferred()
class DataFactory(protocol.ServerFactory):
protocol = _BufferingProtocol
def buildProtocol(self, addr):
p = protocol.ServerFactory.buildProtocol(self, addr)
reactor.callLater(0, deferred.callback, p)
return p
dataPort = reactor.listenTCP(0, DataFactory(), interface='127.0.0.1')
self.dataPorts.append(dataPort)
cmd = 'PORT ' + ftp.encodeHostPort('127.0.0.1', dataPort.getHost().port)
self.client.queueStringCommand(cmd)
return deferred
def tearDown(self):
l = [defer.maybeDeferred(port.stopListening) for port in self.dataPorts]
d = defer.maybeDeferred(
FTPServerPasvDataConnectionTestCase.tearDown, self)
l.append(d)
return defer.DeferredList(l, fireOnOneErrback=True)
def testPORTCannotConnect(self):
# Login
d = self._anonymousLogin()
# Listen on a port, and immediately stop listening as a way to find a
# port number that is definitely closed.
def loggedIn(ignored):
port = reactor.listenTCP(0, protocol.Factory(),
interface='127.0.0.1')
portNum = port.getHost().port
d = port.stopListening()
d.addCallback(lambda _: portNum)
return d
d.addCallback(loggedIn)
# Tell the server to connect to that port with a PORT command, and
# verify that it fails with the right error.
def gotPortNum(portNum):
return self.assertCommandFailed(
'PORT ' + ftp.encodeHostPort('127.0.0.1', portNum),
["425 Can't open data connection."])
return d.addCallback(gotPortNum)
def test_nlstGlobbing(self):
"""
When Unix shell globbing is used with NLST only files matching the
pattern will be returned.
"""
self.dirPath.child('test.txt').touch()
self.dirPath.child('ceva.txt').touch()
self.dirPath.child('no.match').touch()
d = self._anonymousLogin()
self._download('NLST *.txt', chainDeferred=d)
def checkDownload(download):
filenames = download[:-2].split('\r\n')
filenames.sort()
self.assertEqual(['ceva.txt', 'test.txt'], filenames)
return d.addCallback(checkDownload)
class DTPFactoryTests(unittest.TestCase):
"""
Tests for L{ftp.DTPFactory}.
"""
def setUp(self):
"""
Create a fake protocol interpreter and a L{ftp.DTPFactory} instance to
test.
"""
self.reactor = task.Clock()
class ProtocolInterpreter(object):
dtpInstance = None
self.protocolInterpreter = ProtocolInterpreter()
self.factory = ftp.DTPFactory(
self.protocolInterpreter, None, self.reactor)
def test_setTimeout(self):
"""
L{ftp.DTPFactory.setTimeout} uses the reactor passed to its initializer
to set up a timed event to time out the DTP setup after the specified
number of seconds.
"""
# Make sure the factory's deferred fails with the right exception, and
# make it so we can tell exactly when it fires.
finished = []
d = self.assertFailure(self.factory.deferred, ftp.PortConnectionError)
d.addCallback(finished.append)
self.factory.setTimeout(6)
# Advance the clock almost to the timeout
self.reactor.advance(5)
# Nothing should have happened yet.
self.assertFalse(finished)
# Advance it to the configured timeout.
self.reactor.advance(1)
# Now the Deferred should have failed with TimeoutError.
self.assertTrue(finished)
# There should also be no calls left in the reactor.
self.assertFalse(self.reactor.calls)
def test_buildProtocolOnce(self):
"""
A L{ftp.DTPFactory} instance's C{buildProtocol} method can be used once
to create a L{ftp.DTP} instance.
"""
protocol = self.factory.buildProtocol(None)
self.assertIsInstance(protocol, ftp.DTP)
# A subsequent call returns None.
self.assertIdentical(self.factory.buildProtocol(None), None)
def test_timeoutAfterConnection(self):
"""
If a timeout has been set up using L{ftp.DTPFactory.setTimeout}, it is
cancelled by L{ftp.DTPFactory.buildProtocol}.
"""
self.factory.setTimeout(10)
self.factory.buildProtocol(None)
# Make sure the call is no longer active.
self.assertFalse(self.reactor.calls)
def test_connectionAfterTimeout(self):
"""
If L{ftp.DTPFactory.buildProtocol} is called after the timeout
specified by L{ftp.DTPFactory.setTimeout} has elapsed, C{None} is
returned.
"""
# Handle the error so it doesn't get logged.
d = self.assertFailure(self.factory.deferred, ftp.PortConnectionError)
# Set up the timeout and then cause it to elapse so the Deferred does
# fail.
self.factory.setTimeout(10)
self.reactor.advance(10)
# Try to get a protocol - we should not be able to.
self.assertIdentical(self.factory.buildProtocol(None), None)
# Make sure the Deferred is doing the right thing.
return d
def test_timeoutAfterConnectionFailed(self):
"""
L{ftp.DTPFactory.deferred} fails with L{PortConnectionError} when
L{ftp.DTPFactory.clientConnectionFailed} is called. If the timeout
specified with L{ftp.DTPFactory.setTimeout} expires after that, nothing
additional happens.
"""
finished = []
d = self.assertFailure(self.factory.deferred, ftp.PortConnectionError)
d.addCallback(finished.append)
self.factory.setTimeout(10)
self.assertFalse(finished)
self.factory.clientConnectionFailed(None, None)
self.assertTrue(finished)
self.reactor.advance(10)
return d
def test_connectionFailedAfterTimeout(self):
"""
If L{ftp.DTPFactory.clientConnectionFailed} is called after the timeout
specified by L{ftp.DTPFactory.setTimeout} has elapsed, nothing beyond
the normal timeout before happens.
"""
# Handle the error so it doesn't get logged.
d = self.assertFailure(self.factory.deferred, ftp.PortConnectionError)
# Set up the timeout and then cause it to elapse so the Deferred does
# fail.
self.factory.setTimeout(10)
self.reactor.advance(10)
# Now fail the connection attempt. This should do nothing. In
# particular, it should not raise an exception.
self.factory.clientConnectionFailed(None, defer.TimeoutError("foo"))
# Give the Deferred to trial so it can make sure it did what we
# expected.
return d
class DTPTests(unittest.TestCase):
"""
Tests for L{ftp.DTP}.
The DTP instances in these tests are generated using
DTPFactory.buildProtocol()
"""
def setUp(self):
"""
Create a fake protocol interpreter, a L{ftp.DTPFactory} instance,
and dummy transport to help with tests.
"""
self.reactor = task.Clock()
class ProtocolInterpreter(object):
dtpInstance = None
self.protocolInterpreter = ProtocolInterpreter()
self.factory = ftp.DTPFactory(
self.protocolInterpreter, None, self.reactor)
self.transport = proto_helpers.StringTransportWithDisconnection()
def test_sendLineNewline(self):
"""
L{ftp.DTP.sendLine} writes the line passed to it plus a line delimiter
to its transport.
"""
dtpInstance = self.factory.buildProtocol(None)
dtpInstance.makeConnection(self.transport)
lineContent = 'line content'
dtpInstance.sendLine(lineContent)
dataSent = self.transport.value()
self.assertEqual(lineContent + '\r\n', dataSent)
# -- Client Tests -----------------------------------------------------------
class PrintLines(protocol.Protocol):
"""Helper class used by FTPFileListingTests."""
def __init__(self, lines):
self._lines = lines
def connectionMade(self):
for line in self._lines:
self.transport.write(line + "\r\n")
self.transport.loseConnection()
class MyFTPFileListProtocol(ftp.FTPFileListProtocol):
def __init__(self):
self.other = []
ftp.FTPFileListProtocol.__init__(self)
def unknownLine(self, line):
self.other.append(line)
class FTPFileListingTests(unittest.TestCase):
def getFilesForLines(self, lines):
fileList = MyFTPFileListProtocol()
d = loopback.loopbackAsync(PrintLines(lines), fileList)
d.addCallback(lambda _: (fileList.files, fileList.other))
return d
def testOneLine(self):
# This example line taken from the docstring for FTPFileListProtocol
line = '-rw-r--r-- 1 root other 531 Jan 29 03:26 README'
def check(((file,), other)):
self.failIf(other, 'unexpect unparsable lines: %s' % repr(other))
self.failUnless(file['filetype'] == '-', 'misparsed fileitem')
self.failUnless(file['perms'] == 'rw-r--r--', 'misparsed perms')
self.failUnless(file['owner'] == 'root', 'misparsed fileitem')
self.failUnless(file['group'] == 'other', 'misparsed fileitem')
self.failUnless(file['size'] == 531, 'misparsed fileitem')
self.failUnless(file['date'] == 'Jan 29 03:26', 'misparsed fileitem')
self.failUnless(file['filename'] == 'README', 'misparsed fileitem')
self.failUnless(file['nlinks'] == 1, 'misparsed nlinks')
self.failIf(file['linktarget'], 'misparsed linktarget')
return self.getFilesForLines([line]).addCallback(check)
def testVariantLines(self):
line1 = 'drw-r--r-- 2 root other 531 Jan 9 2003 A'
line2 = 'lrw-r--r-- 1 root other 1 Jan 29 03:26 B -> A'
line3 = 'woohoo! '
def check(((file1, file2), (other,))):
self.failUnless(other == 'woohoo! \r', 'incorrect other line')
# file 1
self.failUnless(file1['filetype'] == 'd', 'misparsed fileitem')
self.failUnless(file1['perms'] == 'rw-r--r--', 'misparsed perms')
self.failUnless(file1['owner'] == 'root', 'misparsed owner')
self.failUnless(file1['group'] == 'other', 'misparsed group')
self.failUnless(file1['size'] == 531, 'misparsed size')
self.failUnless(file1['date'] == 'Jan 9 2003', 'misparsed date')
self.failUnless(file1['filename'] == 'A', 'misparsed filename')
self.failUnless(file1['nlinks'] == 2, 'misparsed nlinks')
self.failIf(file1['linktarget'], 'misparsed linktarget')
# file 2
self.failUnless(file2['filetype'] == 'l', 'misparsed fileitem')
self.failUnless(file2['perms'] == 'rw-r--r--', 'misparsed perms')
self.failUnless(file2['owner'] == 'root', 'misparsed owner')
self.failUnless(file2['group'] == 'other', 'misparsed group')
self.failUnless(file2['size'] == 1, 'misparsed size')
self.failUnless(file2['date'] == 'Jan 29 03:26', 'misparsed date')
self.failUnless(file2['filename'] == 'B', 'misparsed filename')
self.failUnless(file2['nlinks'] == 1, 'misparsed nlinks')
self.failUnless(file2['linktarget'] == 'A', 'misparsed linktarget')
return self.getFilesForLines([line1, line2, line3]).addCallback(check)
def testUnknownLine(self):
def check((files, others)):
self.failIf(files, 'unexpected file entries')
self.failUnless(others == ['ABC\r', 'not a file\r'],
'incorrect unparsable lines: %s' % repr(others))
return self.getFilesForLines(['ABC', 'not a file']).addCallback(check)
def test_filenameWithUnescapedSpace(self):
'''
Will parse filenames and linktargets containing unescaped
space characters.
'''
line1 = 'drw-r--r-- 2 root other 531 Jan 9 2003 A B'
line2 = (
'lrw-r--r-- 1 root other 1 Jan 29 03:26 '
'B A -> D C/A B'
)
def check((files, others)):
self.assertEqual([], others, 'unexpected others entries')
self.assertEqual(
'A B', files[0]['filename'], 'misparsed filename')
self.assertEqual(
'B A', files[1]['filename'], 'misparsed filename')
self.assertEqual(
'D C/A B', files[1]['linktarget'], 'misparsed linktarget')
return self.getFilesForLines([line1, line2]).addCallback(check)
def test_filenameWithEscapedSpace(self):
'''
Will parse filenames and linktargets containing escaped
space characters.
'''
line1 = 'drw-r--r-- 2 root other 531 Jan 9 2003 A\ B'
line2 = (
'lrw-r--r-- 1 root other 1 Jan 29 03:26 '
'B A -> D\ C/A B'
)
def check((files, others)):
self.assertEqual([], others, 'unexpected others entries')
self.assertEqual(
'A B', files[0]['filename'], 'misparsed filename')
self.assertEqual(
'B A', files[1]['filename'], 'misparsed filename')
self.assertEqual(
'D C/A B', files[1]['linktarget'], 'misparsed linktarget')
return self.getFilesForLines([line1, line2]).addCallback(check)
def testYear(self):
# This example derived from bug description in issue 514.
fileList = ftp.FTPFileListProtocol()
exampleLine = (
'-rw-r--r-- 1 root other 531 Jan 29 2003 README\n')
class PrintLine(protocol.Protocol):
def connectionMade(self):
self.transport.write(exampleLine)
self.transport.loseConnection()
def check(ignored):
file = fileList.files[0]
self.failUnless(file['size'] == 531, 'misparsed fileitem')
self.failUnless(file['date'] == 'Jan 29 2003', 'misparsed fileitem')
self.failUnless(file['filename'] == 'README', 'misparsed fileitem')
d = loopback.loopbackAsync(PrintLine(), fileList)
return d.addCallback(check)
class FTPClientTests(unittest.TestCase):
def testFailedRETR(self):
f = protocol.Factory()
f.noisy = 0
port = reactor.listenTCP(0, f, interface="127.0.0.1")
self.addCleanup(port.stopListening)
portNum = port.getHost().port
# This test data derived from a bug report by ranty on #twisted
responses = ['220 ready, dude (vsFTPd 1.0.0: beat me, break me)',
# USER anonymous
'331 Please specify the password.',
# PASS twisted@twistedmatrix.com
'230 Login successful. Have fun.',
# TYPE I
'200 Binary it is, then.',
# PASV
'227 Entering Passive Mode (127,0,0,1,%d,%d)' %
(portNum >> 8, portNum & 0xff),
# RETR /file/that/doesnt/exist
'550 Failed to open file.']
f.buildProtocol = lambda addr: PrintLines(responses)
client = ftp.FTPClient(passive=1)
cc = protocol.ClientCreator(reactor, ftp.FTPClient, passive=1)
d = cc.connectTCP('127.0.0.1', portNum)
def gotClient(client):
p = protocol.Protocol()
return client.retrieveFile('/file/that/doesnt/exist', p)
d.addCallback(gotClient)
return self.assertFailure(d, ftp.CommandFailed)
def test_errbacksUponDisconnect(self):
"""
Test the ftp command errbacks when a connection lost happens during
the operation.
"""
ftpClient = ftp.FTPClient()
tr = proto_helpers.StringTransportWithDisconnection()
ftpClient.makeConnection(tr)
tr.protocol = ftpClient
d = ftpClient.list('some path', Dummy())
m = []
def _eb(failure):
m.append(failure)
return None
d.addErrback(_eb)
from twisted.internet.main import CONNECTION_LOST
ftpClient.connectionLost(failure.Failure(CONNECTION_LOST))
self.failUnless(m, m)
return d
class FTPClientTestCase(unittest.TestCase):
"""
Test advanced FTP client commands.
"""
def setUp(self):
"""
Create a FTP client and connect it to fake transport.
"""
self.client = ftp.FTPClient()
self.transport = proto_helpers.StringTransportWithDisconnection()
self.client.makeConnection(self.transport)
self.transport.protocol = self.client
def tearDown(self):
"""
Deliver disconnection notification to the client so that it can
perform any cleanup which may be required.
"""
self.client.connectionLost(error.ConnectionLost())
def _testLogin(self):
"""
Test the login part.
"""
self.assertEqual(self.transport.value(), '')
self.client.lineReceived(
'331 Guest login ok, type your email address as password.')
self.assertEqual(self.transport.value(), 'USER anonymous\r\n')
self.transport.clear()
self.client.lineReceived(
'230 Anonymous login ok, access restrictions apply.')
self.assertEqual(self.transport.value(), 'TYPE I\r\n')
self.transport.clear()
self.client.lineReceived('200 Type set to I.')
def test_CDUP(self):
"""
Test the CDUP command.
L{ftp.FTPClient.cdup} should return a Deferred which fires with a
sequence of one element which is the string the server sent
indicating that the command was executed successfully.
(XXX - This is a bad API)
"""
def cbCdup(res):
self.assertEqual(res[0], '250 Requested File Action Completed OK')
self._testLogin()
d = self.client.cdup().addCallback(cbCdup)
self.assertEqual(self.transport.value(), 'CDUP\r\n')
self.transport.clear()
self.client.lineReceived('250 Requested File Action Completed OK')
return d
def test_failedCDUP(self):
"""
Test L{ftp.FTPClient.cdup}'s handling of a failed CDUP command.
When the CDUP command fails, the returned Deferred should errback
with L{ftp.CommandFailed}.
"""
self._testLogin()
d = self.client.cdup()
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'CDUP\r\n')
self.transport.clear()
self.client.lineReceived('550 ..: No such file or directory')
return d
def test_PWD(self):
"""
Test the PWD command.
L{ftp.FTPClient.pwd} should return a Deferred which fires with a
sequence of one element which is a string representing the current
working directory on the server.
(XXX - This is a bad API)
"""
def cbPwd(res):
self.assertEqual(ftp.parsePWDResponse(res[0]), "/bar/baz")
self._testLogin()
d = self.client.pwd().addCallback(cbPwd)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('257 "/bar/baz"')
return d
def test_failedPWD(self):
"""
Test a failure in PWD command.
When the PWD command fails, the returned Deferred should errback
with L{ftp.CommandFailed}.
"""
self._testLogin()
d = self.client.pwd()
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('550 /bar/baz: No such file or directory')
return d
def test_CWD(self):
"""
Test the CWD command.
L{ftp.FTPClient.cwd} should return a Deferred which fires with a
sequence of one element which is the string the server sent
indicating that the command was executed successfully.
(XXX - This is a bad API)
"""
def cbCwd(res):
self.assertEqual(res[0], '250 Requested File Action Completed OK')
self._testLogin()
d = self.client.cwd("bar/foo").addCallback(cbCwd)
self.assertEqual(self.transport.value(), 'CWD bar/foo\r\n')
self.client.lineReceived('250 Requested File Action Completed OK')
return d
def test_failedCWD(self):
"""
Test a failure in CWD command.
When the PWD command fails, the returned Deferred should errback
with L{ftp.CommandFailed}.
"""
self._testLogin()
d = self.client.cwd("bar/foo")
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'CWD bar/foo\r\n')
self.client.lineReceived('550 bar/foo: No such file or directory')
return d
def test_passiveRETR(self):
"""
Test the RETR command in passive mode: get a file and verify its
content.
L{ftp.FTPClient.retrieveFile} should return a Deferred which fires
with the protocol instance passed to it after the download has
completed.
(XXX - This API should be based on producers and consumers)
"""
def cbRetr(res, proto):
self.assertEqual(proto.buffer, 'x' * 1000)
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.dataReceived("x" * 1000)
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
proto = _BufferingProtocol()
d = self.client.retrieveFile("spam", proto)
d.addCallback(cbRetr, proto)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'RETR spam\r\n')
self.transport.clear()
self.client.lineReceived('226 Transfer Complete.')
return d
def test_RETR(self):
"""
Test the RETR command in non-passive mode.
Like L{test_passiveRETR} but in the configuration where the server
establishes the data connection to the client, rather than the other
way around.
"""
self.client.passive = False
def generatePort(portCmd):
portCmd.text = 'PORT %s' % (ftp.encodeHostPort('127.0.0.1', 9876),)
portCmd.protocol.makeConnection(proto_helpers.StringTransport())
portCmd.protocol.dataReceived("x" * 1000)
portCmd.protocol.connectionLost(
failure.Failure(error.ConnectionDone("")))
def cbRetr(res, proto):
self.assertEqual(proto.buffer, 'x' * 1000)
self.client.generatePortCommand = generatePort
self._testLogin()
proto = _BufferingProtocol()
d = self.client.retrieveFile("spam", proto)
d.addCallback(cbRetr, proto)
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'RETR spam\r\n')
self.transport.clear()
self.client.lineReceived('226 Transfer Complete.')
return d
def test_failedRETR(self):
"""
Try to RETR an unexisting file.
L{ftp.FTPClient.retrieveFile} should return a Deferred which
errbacks with L{ftp.CommandFailed} if the server indicates the file
cannot be transferred for some reason.
"""
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
proto = _BufferingProtocol()
d = self.client.retrieveFile("spam", proto)
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'RETR spam\r\n')
self.transport.clear()
self.client.lineReceived('550 spam: No such file or directory')
return d
def test_lostRETR(self):
"""
Try a RETR, but disconnect during the transfer.
L{ftp.FTPClient.retrieveFile} should return a Deferred which
errbacks with L{ftp.ConnectionLost)
"""
self.client.passive = False
l = []
def generatePort(portCmd):
portCmd.text = 'PORT %s' % (ftp.encodeHostPort('127.0.0.1', 9876),)
tr = proto_helpers.StringTransportWithDisconnection()
portCmd.protocol.makeConnection(tr)
tr.protocol = portCmd.protocol
portCmd.protocol.dataReceived("x" * 500)
l.append(tr)
self.client.generatePortCommand = generatePort
self._testLogin()
proto = _BufferingProtocol()
d = self.client.retrieveFile("spam", proto)
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'RETR spam\r\n')
self.assert_(l)
l[0].loseConnection()
self.transport.loseConnection()
self.assertFailure(d, ftp.ConnectionLost)
return d
def test_passiveSTOR(self):
"""
Test the STOR command: send a file and verify its content.
L{ftp.FTPClient.storeFile} should return a two-tuple of Deferreds.
The first of which should fire with a protocol instance when the
data connection has been established and is responsible for sending
the contents of the file. The second of which should fire when the
upload has completed, the data connection has been closed, and the
server has acknowledged receipt of the file.
(XXX - storeFile should take a producer as an argument, instead, and
only return a Deferred which fires when the upload has succeeded or
failed).
"""
tr = proto_helpers.StringTransport()
def cbStore(sender):
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sender.transport.write("x" * 1000)
sender.finish()
sender.connectionLost(failure.Failure(error.ConnectionDone("")))
def cbFinish(ign):
self.assertEqual(tr.value(), "x" * 1000)
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(tr)
self.client.connectFactory = cbConnect
self._testLogin()
d1, d2 = self.client.storeFile("spam")
d1.addCallback(cbStore)
d2.addCallback(cbFinish)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'STOR spam\r\n')
self.transport.clear()
self.client.lineReceived('226 Transfer Complete.')
return defer.gatherResults([d1, d2])
def test_failedSTOR(self):
"""
Test a failure in the STOR command.
If the server does not acknowledge successful receipt of the
uploaded file, the second Deferred returned by
L{ftp.FTPClient.storeFile} should errback with L{ftp.CommandFailed}.
"""
tr = proto_helpers.StringTransport()
def cbStore(sender):
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sender.transport.write("x" * 1000)
sender.finish()
sender.connectionLost(failure.Failure(error.ConnectionDone("")))
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(tr)
self.client.connectFactory = cbConnect
self._testLogin()
d1, d2 = self.client.storeFile("spam")
d1.addCallback(cbStore)
self.assertFailure(d2, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'STOR spam\r\n')
self.transport.clear()
self.client.lineReceived(
'426 Transfer aborted. Data connection closed.')
return defer.gatherResults([d1, d2])
def test_STOR(self):
"""
Test the STOR command in non-passive mode.
Like L{test_passiveSTOR} but in the configuration where the server
establishes the data connection to the client, rather than the other
way around.
"""
tr = proto_helpers.StringTransport()
self.client.passive = False
def generatePort(portCmd):
portCmd.text = 'PORT %s' % ftp.encodeHostPort('127.0.0.1', 9876)
portCmd.protocol.makeConnection(tr)
def cbStore(sender):
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'STOR spam\r\n')
self.transport.clear()
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sender.transport.write("x" * 1000)
sender.finish()
sender.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.lineReceived('226 Transfer Complete.')
def cbFinish(ign):
self.assertEqual(tr.value(), "x" * 1000)
self.client.generatePortCommand = generatePort
self._testLogin()
d1, d2 = self.client.storeFile("spam")
d1.addCallback(cbStore)
d2.addCallback(cbFinish)
return defer.gatherResults([d1, d2])
def test_passiveLIST(self):
"""
Test the LIST command.
L{ftp.FTPClient.list} should return a Deferred which fires with a
protocol instance which was passed to list after the command has
succeeded.
(XXX - This is a very unfortunate API; if my understanding is
correct, the results are always at least line-oriented, so allowing
a per-line parser function to be specified would make this simpler,
but a default implementation should really be provided which knows
how to deal with all the formats used in real servers, so
application developers never have to care about this insanity. It
would also be nice to either get back a Deferred of a list of
filenames or to be able to consume the files as they are received
(which the current API does allow, but in a somewhat inconvenient
fashion) -exarkun)
"""
def cbList(res, fileList):
fls = [f["filename"] for f in fileList.files]
expected = ["foo", "bar", "baz"]
expected.sort()
fls.sort()
self.assertEqual(fls, expected)
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sending = [
'-rw-r--r-- 0 spam egg 100 Oct 10 2006 foo\r\n',
'-rw-r--r-- 3 spam egg 100 Oct 10 2006 bar\r\n',
'-rw-r--r-- 4 spam egg 100 Oct 10 2006 baz\r\n',
]
for i in sending:
proto.dataReceived(i)
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
fileList = ftp.FTPFileListProtocol()
d = self.client.list('foo/bar', fileList).addCallback(cbList, fileList)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'LIST foo/bar\r\n')
self.client.lineReceived('226 Transfer Complete.')
return d
def test_LIST(self):
"""
Test the LIST command in non-passive mode.
Like L{test_passiveLIST} but in the configuration where the server
establishes the data connection to the client, rather than the other
way around.
"""
self.client.passive = False
def generatePort(portCmd):
portCmd.text = 'PORT %s' % (ftp.encodeHostPort('127.0.0.1', 9876),)
portCmd.protocol.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
sending = [
'-rw-r--r-- 0 spam egg 100 Oct 10 2006 foo\r\n',
'-rw-r--r-- 3 spam egg 100 Oct 10 2006 bar\r\n',
'-rw-r--r-- 4 spam egg 100 Oct 10 2006 baz\r\n',
]
for i in sending:
portCmd.protocol.dataReceived(i)
portCmd.protocol.connectionLost(
failure.Failure(error.ConnectionDone("")))
def cbList(res, fileList):
fls = [f["filename"] for f in fileList.files]
expected = ["foo", "bar", "baz"]
expected.sort()
fls.sort()
self.assertEqual(fls, expected)
self.client.generatePortCommand = generatePort
self._testLogin()
fileList = ftp.FTPFileListProtocol()
d = self.client.list('foo/bar', fileList).addCallback(cbList, fileList)
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'LIST foo/bar\r\n')
self.transport.clear()
self.client.lineReceived('226 Transfer Complete.')
return d
def test_failedLIST(self):
"""
Test a failure in LIST command.
L{ftp.FTPClient.list} should return a Deferred which fails with
L{ftp.CommandFailed} if the server indicates the indicated path is
invalid for some reason.
"""
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
fileList = ftp.FTPFileListProtocol()
d = self.client.list('foo/bar', fileList)
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'LIST foo/bar\r\n')
self.client.lineReceived('550 foo/bar: No such file or directory')
return d
def test_NLST(self):
"""
Test the NLST command in non-passive mode.
L{ftp.FTPClient.nlst} should return a Deferred which fires with a
list of filenames when the list command has completed.
"""
self.client.passive = False
def generatePort(portCmd):
portCmd.text = 'PORT %s' % (ftp.encodeHostPort('127.0.0.1', 9876),)
portCmd.protocol.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
portCmd.protocol.dataReceived('foo\r\n')
portCmd.protocol.dataReceived('bar\r\n')
portCmd.protocol.dataReceived('baz\r\n')
portCmd.protocol.connectionLost(
failure.Failure(error.ConnectionDone("")))
def cbList(res, proto):
fls = proto.buffer.splitlines()
expected = ["foo", "bar", "baz"]
expected.sort()
fls.sort()
self.assertEqual(fls, expected)
self.client.generatePortCommand = generatePort
self._testLogin()
lstproto = _BufferingProtocol()
d = self.client.nlst('foo/bar', lstproto).addCallback(cbList, lstproto)
self.assertEqual(self.transport.value(), 'PORT %s\r\n' %
(ftp.encodeHostPort('127.0.0.1', 9876),))
self.transport.clear()
self.client.lineReceived('200 PORT OK')
self.assertEqual(self.transport.value(), 'NLST foo/bar\r\n')
self.client.lineReceived('226 Transfer Complete.')
return d
def test_passiveNLST(self):
"""
Test the NLST command.
Like L{test_passiveNLST} but in the configuration where the server
establishes the data connection to the client, rather than the other
way around.
"""
def cbList(res, proto):
fls = proto.buffer.splitlines()
expected = ["foo", "bar", "baz"]
expected.sort()
fls.sort()
self.assertEqual(fls, expected)
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(proto_helpers.StringTransport())
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.dataReceived('foo\r\n')
proto.dataReceived('bar\r\n')
proto.dataReceived('baz\r\n')
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
lstproto = _BufferingProtocol()
d = self.client.nlst('foo/bar', lstproto).addCallback(cbList, lstproto)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'NLST foo/bar\r\n')
self.client.lineReceived('226 Transfer Complete.')
return d
def test_failedNLST(self):
"""
Test a failure in NLST command.
L{ftp.FTPClient.nlst} should return a Deferred which fails with
L{ftp.CommandFailed} if the server indicates the indicated path is
invalid for some reason.
"""
tr = proto_helpers.StringTransport()
def cbConnect(host, port, factory):
self.assertEqual(host, '127.0.0.1')
self.assertEqual(port, 12345)
proto = factory.buildProtocol((host, port))
proto.makeConnection(tr)
self.client.lineReceived(
'150 File status okay; about to open data connection.')
proto.connectionLost(failure.Failure(error.ConnectionDone("")))
self.client.connectFactory = cbConnect
self._testLogin()
lstproto = _BufferingProtocol()
d = self.client.nlst('foo/bar', lstproto)
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PASV\r\n')
self.transport.clear()
self.client.lineReceived('227 Entering Passive Mode (%s).' %
(ftp.encodeHostPort('127.0.0.1', 12345),))
self.assertEqual(self.transport.value(), 'NLST foo/bar\r\n')
self.client.lineReceived('550 foo/bar: No such file or directory')
return d
def test_renameFromTo(self):
"""
L{ftp.FTPClient.rename} issues I{RNTO} and I{RNFR} commands and returns
a L{Deferred} which fires when a file has successfully been renamed.
"""
self._testLogin()
d = self.client.rename("/spam", "/ham")
self.assertEqual(self.transport.value(), 'RNFR /spam\r\n')
self.transport.clear()
fromResponse = (
'350 Requested file action pending further information.\r\n')
self.client.lineReceived(fromResponse)
self.assertEqual(self.transport.value(), 'RNTO /ham\r\n')
toResponse = (
'250 Requested File Action Completed OK')
self.client.lineReceived(toResponse)
d.addCallback(self.assertEqual, ([fromResponse], [toResponse]))
return d
def test_renameFromToEscapesPaths(self):
"""
L{ftp.FTPClient.rename} issues I{RNTO} and I{RNFR} commands with paths
escaped according to U{http://cr.yp.to/ftp/filesystem.html}.
"""
self._testLogin()
fromFile = "/foo/ba\nr/baz"
toFile = "/qu\nux"
self.client.rename(fromFile, toFile)
self.client.lineReceived("350 ")
self.client.lineReceived("250 ")
self.assertEqual(
self.transport.value(),
"RNFR /foo/ba\x00r/baz\r\n"
"RNTO /qu\x00ux\r\n")
def test_renameFromToFailingOnFirstError(self):
"""
The L{Deferred} returned by L{ftp.FTPClient.rename} is errbacked with
L{CommandFailed} if the I{RNFR} command receives an error response code
(for example, because the file does not exist).
"""
self._testLogin()
d = self.client.rename("/spam", "/ham")
self.assertEqual(self.transport.value(), 'RNFR /spam\r\n')
self.transport.clear()
self.client.lineReceived('550 Requested file unavailable.\r\n')
# The RNTO should not execute since the RNFR failed.
self.assertEqual(self.transport.value(), '')
return self.assertFailure(d, ftp.CommandFailed)
def test_renameFromToFailingOnRenameTo(self):
"""
The L{Deferred} returned by L{ftp.FTPClient.rename} is errbacked with
L{CommandFailed} if the I{RNTO} command receives an error response code
(for example, because the destination directory does not exist).
"""
self._testLogin()
d = self.client.rename("/spam", "/ham")
self.assertEqual(self.transport.value(), 'RNFR /spam\r\n')
self.transport.clear()
self.client.lineReceived('350 Requested file action pending further information.\r\n')
self.assertEqual(self.transport.value(), 'RNTO /ham\r\n')
self.client.lineReceived('550 Requested file unavailable.\r\n')
return self.assertFailure(d, ftp.CommandFailed)
def test_makeDirectory(self):
"""
L{ftp.FTPClient.makeDirectory} issues a I{MKD} command and returns a
L{Deferred} which is called back with the server's response if the
directory is created.
"""
self._testLogin()
d = self.client.makeDirectory("/spam")
self.assertEqual(self.transport.value(), 'MKD /spam\r\n')
self.client.lineReceived('257 "/spam" created.')
return d.addCallback(self.assertEqual, ['257 "/spam" created.'])
def test_makeDirectoryPathEscape(self):
"""
L{ftp.FTPClient.makeDirectory} escapes the path name it sends according
to U{http://cr.yp.to/ftp/filesystem.html}.
"""
self._testLogin()
d = self.client.makeDirectory("/sp\nam")
self.assertEqual(self.transport.value(), 'MKD /sp\x00am\r\n')
# This is necessary to make the Deferred fire. The Deferred needs
# to fire so that tearDown doesn't cause it to errback and fail this
# or (more likely) a later test.
self.client.lineReceived('257 win')
return d
def test_failedMakeDirectory(self):
"""
L{ftp.FTPClient.makeDirectory} returns a L{Deferred} which is errbacked
with L{CommandFailed} if the server returns an error response code.
"""
self._testLogin()
d = self.client.makeDirectory("/spam")
self.assertEqual(self.transport.value(), 'MKD /spam\r\n')
self.client.lineReceived('550 PERMISSION DENIED')
return self.assertFailure(d, ftp.CommandFailed)
def test_getDirectory(self):
"""
Test the getDirectory method.
L{ftp.FTPClient.getDirectory} should return a Deferred which fires with
the current directory on the server. It wraps PWD command.
"""
def cbGet(res):
self.assertEqual(res, "/bar/baz")
self._testLogin()
d = self.client.getDirectory().addCallback(cbGet)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('257 "/bar/baz"')
return d
def test_failedGetDirectory(self):
"""
Test a failure in getDirectory method.
The behaviour should be the same as PWD.
"""
self._testLogin()
d = self.client.getDirectory()
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('550 /bar/baz: No such file or directory')
return d
def test_anotherFailedGetDirectory(self):
"""
Test a different failure in getDirectory method.
The response should be quoted to be parsed, so it returns an error
otherwise.
"""
self._testLogin()
d = self.client.getDirectory()
self.assertFailure(d, ftp.CommandFailed)
self.assertEqual(self.transport.value(), 'PWD\r\n')
self.client.lineReceived('257 /bar/baz')
return d
def test_removeFile(self):
"""
L{ftp.FTPClient.removeFile} sends a I{DELE} command to the server for
the indicated file and returns a Deferred which fires after the server
sends a 250 response code.
"""
self._testLogin()
d = self.client.removeFile("/tmp/test")
self.assertEqual(self.transport.value(), 'DELE /tmp/test\r\n')
response = '250 Requested file action okay, completed.'
self.client.lineReceived(response)
return d.addCallback(self.assertEqual, [response])
def test_failedRemoveFile(self):
"""
If the server returns a response code other than 250 in response to a
I{DELE} sent by L{ftp.FTPClient.removeFile}, the L{Deferred} returned
by C{removeFile} is errbacked with a L{Failure} wrapping a
L{CommandFailed}.
"""
self._testLogin()
d = self.client.removeFile("/tmp/test")
self.assertEqual(self.transport.value(), 'DELE /tmp/test\r\n')
response = '501 Syntax error in parameters or arguments.'
self.client.lineReceived(response)
d = self.assertFailure(d, ftp.CommandFailed)
d.addCallback(lambda exc: self.assertEqual(exc.args, ([response],)))
return d
def test_unparsableRemoveFileResponse(self):
"""
If the server returns a response line which cannot be parsed, the
L{Deferred} returned by L{ftp.FTPClient.removeFile} is errbacked with a
L{BadResponse} containing the response.
"""
self._testLogin()
d = self.client.removeFile("/tmp/test")
response = '765 blah blah blah'
self.client.lineReceived(response)
d = self.assertFailure(d, ftp.BadResponse)
d.addCallback(lambda exc: self.assertEqual(exc.args, ([response],)))
return d
def test_multilineRemoveFileResponse(self):
"""
If the server returns multiple response lines, the L{Deferred} returned
by L{ftp.FTPClient.removeFile} is still fired with a true value if the
ultimate response code is 250.
"""
self._testLogin()
d = self.client.removeFile("/tmp/test")
response = ['250-perhaps a progress report',
'250 okay']
map(self.client.lineReceived, response)
return d.addCallback(self.assertTrue)
def test_removeDirectory(self):
"""
L{ftp.FTPClient.removeDirectory} sends a I{RMD} command to the server
for the indicated directory and returns a Deferred which fires after
the server sends a 250 response code.
"""
self._testLogin()
d = self.client.removeDirectory('/tmp/test')
self.assertEqual(self.transport.value(), 'RMD /tmp/test\r\n')
response = '250 Requested file action okay, completed.'
self.client.lineReceived(response)
return d.addCallback(self.assertEqual, [response])
def test_failedRemoveDirectory(self):
"""
If the server returns a response code other than 250 in response to a
I{RMD} sent by L{ftp.FTPClient.removeDirectory}, the L{Deferred}
returned by C{removeDirectory} is errbacked with a L{Failure} wrapping
a L{CommandFailed}.
"""
self._testLogin()
d = self.client.removeDirectory("/tmp/test")
self.assertEqual(self.transport.value(), 'RMD /tmp/test\r\n')
response = '501 Syntax error in parameters or arguments.'
self.client.lineReceived(response)
d = self.assertFailure(d, ftp.CommandFailed)
d.addCallback(lambda exc: self.assertEqual(exc.args, ([response],)))
return d
def test_unparsableRemoveDirectoryResponse(self):
"""
If the server returns a response line which cannot be parsed, the
L{Deferred} returned by L{ftp.FTPClient.removeDirectory} is errbacked
with a L{BadResponse} containing the response.
"""
self._testLogin()
d = self.client.removeDirectory("/tmp/test")
response = '765 blah blah blah'
self.client.lineReceived(response)
d = self.assertFailure(d, ftp.BadResponse)
d.addCallback(lambda exc: self.assertEqual(exc.args, ([response],)))
return d
def test_multilineRemoveDirectoryResponse(self):
"""
If the server returns multiple response lines, the L{Deferred} returned
by L{ftp.FTPClient.removeDirectory} is still fired with a true value
if the ultimate response code is 250.
"""
self._testLogin()
d = self.client.removeDirectory("/tmp/test")
response = ['250-perhaps a progress report',
'250 okay']
map(self.client.lineReceived, response)
return d.addCallback(self.assertTrue)
class FTPClientBasicTests(unittest.TestCase):
def testGreeting(self):
# The first response is captured as a greeting.
ftpClient = ftp.FTPClientBasic()
ftpClient.lineReceived('220 Imaginary FTP.')
self.assertEqual(['220 Imaginary FTP.'], ftpClient.greeting)
def testResponseWithNoMessage(self):
# Responses with no message are still valid, i.e. three digits followed
# by a space is complete response.
ftpClient = ftp.FTPClientBasic()
ftpClient.lineReceived('220 ')
self.assertEqual(['220 '], ftpClient.greeting)
def testMultilineResponse(self):
ftpClient = ftp.FTPClientBasic()
ftpClient.transport = proto_helpers.StringTransport()
ftpClient.lineReceived('220 Imaginary FTP.')
# Queue (and send) a dummy command, and set up a callback to capture the
# result
deferred = ftpClient.queueStringCommand('BLAH')
result = []
deferred.addCallback(result.append)
deferred.addErrback(self.fail)
# Send the first line of a multiline response.
ftpClient.lineReceived('210-First line.')
self.assertEqual([], result)
# Send a second line, again prefixed with "nnn-".
ftpClient.lineReceived('123-Second line.')
self.assertEqual([], result)
# Send a plain line of text, no prefix.
ftpClient.lineReceived('Just some text.')
self.assertEqual([], result)
# Now send a short (less than 4 chars) line.
ftpClient.lineReceived('Hi')
self.assertEqual([], result)
# Now send an empty line.
ftpClient.lineReceived('')
self.assertEqual([], result)
# And a line with 3 digits in it, and nothing else.
ftpClient.lineReceived('321')
self.assertEqual([], result)
# Now finish it.
ftpClient.lineReceived('210 Done.')
self.assertEqual(
['210-First line.',
'123-Second line.',
'Just some text.',
'Hi',
'',
'321',
'210 Done.'], result[0])
def test_noPasswordGiven(self):
"""
Passing None as the password avoids sending the PASS command.
"""
# Create a client, and give it a greeting.
ftpClient = ftp.FTPClientBasic()
ftpClient.transport = proto_helpers.StringTransport()
ftpClient.lineReceived('220 Welcome to Imaginary FTP.')
# Queue a login with no password
ftpClient.queueLogin('bob', None)
self.assertEqual('USER bob\r\n', ftpClient.transport.value())
# Clear the test buffer, acknowledge the USER command.
ftpClient.transport.clear()
ftpClient.lineReceived('200 Hello bob.')
# The client shouldn't have sent anything more (i.e. it shouldn't have
# sent a PASS command).
self.assertEqual('', ftpClient.transport.value())
def test_noPasswordNeeded(self):
"""
Receiving a 230 response to USER prevents PASS from being sent.
"""
# Create a client, and give it a greeting.
ftpClient = ftp.FTPClientBasic()
ftpClient.transport = proto_helpers.StringTransport()
ftpClient.lineReceived('220 Welcome to Imaginary FTP.')
# Queue a login with no password
ftpClient.queueLogin('bob', 'secret')
self.assertEqual('USER bob\r\n', ftpClient.transport.value())
# Clear the test buffer, acknowledge the USER command with a 230
# response code.
ftpClient.transport.clear()
ftpClient.lineReceived('230 Hello bob. No password needed.')
# The client shouldn't have sent anything more (i.e. it shouldn't have
# sent a PASS command).
self.assertEqual('', ftpClient.transport.value())
class PathHandling(unittest.TestCase):
def testNormalizer(self):
for inp, outp in [('a', ['a']),
('/a', ['a']),
('/', []),
('a/b/c', ['a', 'b', 'c']),
('/a/b/c', ['a', 'b', 'c']),
('/a/', ['a']),
('a/', ['a'])]:
self.assertEqual(ftp.toSegments([], inp), outp)
for inp, outp in [('b', ['a', 'b']),
('b/', ['a', 'b']),
('/b', ['b']),
('/b/', ['b']),
('b/c', ['a', 'b', 'c']),
('b/c/', ['a', 'b', 'c']),
('/b/c', ['b', 'c']),
('/b/c/', ['b', 'c'])]:
self.assertEqual(ftp.toSegments(['a'], inp), outp)
for inp, outp in [('//', []),
('//a', ['a']),
('a//', ['a']),
('a//b', ['a', 'b'])]:
self.assertEqual(ftp.toSegments([], inp), outp)
for inp, outp in [('//', []),
('//b', ['b']),
('b//c', ['a', 'b', 'c'])]:
self.assertEqual(ftp.toSegments(['a'], inp), outp)
for inp, outp in [('..', []),
('../', []),
('a/..', ['x']),
('/a/..', []),
('/a/b/..', ['a']),
('/a/b/../', ['a']),
('/a/b/../c', ['a', 'c']),
('/a/b/../c/', ['a', 'c']),
('/a/b/../../c', ['c']),
('/a/b/../../c/', ['c']),
('/a/b/../../c/..', []),
('/a/b/../../c/../', [])]:
self.assertEqual(ftp.toSegments(['x'], inp), outp)
for inp in ['..', '../', 'a/../..', 'a/../../',
'/..', '/../', '/a/../..', '/a/../../',
'/a/b/../../..']:
self.assertRaises(ftp.InvalidPath, ftp.toSegments, [], inp)
for inp in ['../..', '../../', '../a/../..']:
self.assertRaises(ftp.InvalidPath, ftp.toSegments, ['x'], inp)
class IsGlobbingExpressionTests(unittest.TestCase):
"""
Tests for _isGlobbingExpression utility function.
"""
def test_isGlobbingExpressionEmptySegments(self):
"""
_isGlobbingExpression will return False for None, or empty
segments.
"""
self.assertFalse(ftp._isGlobbingExpression())
self.assertFalse(ftp._isGlobbingExpression([]))
self.assertFalse(ftp._isGlobbingExpression(None))
def test_isGlobbingExpressionNoGlob(self):
"""
_isGlobbingExpression will return False for plain segments.
Also, it only checks the last segment part (filename) and will not
check the path name.
"""
self.assertFalse(ftp._isGlobbingExpression(['ignore', 'expr']))
self.assertFalse(ftp._isGlobbingExpression(['*.txt', 'expr']))
def test_isGlobbingExpressionGlob(self):
"""
_isGlobbingExpression will return True for segments which contains
globbing characters in the last segment part (filename).
"""
self.assertTrue(ftp._isGlobbingExpression(['ignore', '*.txt']))
self.assertTrue(ftp._isGlobbingExpression(['ignore', '[a-b].txt']))
self.assertTrue(ftp._isGlobbingExpression(['ignore', 'fil?.txt']))
class BaseFTPRealmTests(unittest.TestCase):
"""
Tests for L{ftp.BaseFTPRealm}, a base class to help define L{IFTPShell}
realms with different user home directory policies.
"""
def test_interface(self):
"""
L{ftp.BaseFTPRealm} implements L{IRealm}.
"""
self.assertTrue(verifyClass(IRealm, ftp.BaseFTPRealm))
def test_getHomeDirectory(self):
"""
L{ftp.BaseFTPRealm} calls its C{getHomeDirectory} method with the
avatarId being requested to determine the home directory for that
avatar.
"""
result = filepath.FilePath(self.mktemp())
avatars = []
class TestRealm(ftp.BaseFTPRealm):
def getHomeDirectory(self, avatarId):
avatars.append(avatarId)
return result
realm = TestRealm(self.mktemp())
iface, avatar, logout = realm.requestAvatar(
"alice@example.com", None, ftp.IFTPShell)
self.assertIsInstance(avatar, ftp.FTPShell)
self.assertEqual(avatar.filesystemRoot, result)
def test_anonymous(self):
"""
L{ftp.BaseFTPRealm} returns an L{ftp.FTPAnonymousShell} instance for
anonymous avatar requests.
"""
anonymous = self.mktemp()
realm = ftp.BaseFTPRealm(anonymous)
iface, avatar, logout = realm.requestAvatar(
checkers.ANONYMOUS, None, ftp.IFTPShell)
self.assertIsInstance(avatar, ftp.FTPAnonymousShell)
self.assertEqual(avatar.filesystemRoot, filepath.FilePath(anonymous))
def test_notImplemented(self):
"""
L{ftp.BaseFTPRealm.getHomeDirectory} should be overridden by a subclass
and raises L{NotImplementedError} if it is not.
"""
realm = ftp.BaseFTPRealm(self.mktemp())
self.assertRaises(NotImplementedError, realm.getHomeDirectory, object())
class FTPRealmTestCase(unittest.TestCase):
"""
Tests for L{ftp.FTPRealm}.
"""
def test_getHomeDirectory(self):
"""
L{ftp.FTPRealm} accepts an extra directory to its initializer and treats
the avatarId passed to L{ftp.FTPRealm.getHomeDirectory} as a single path
segment to construct a child of that directory.
"""
base = '/path/to/home'
realm = ftp.FTPRealm(self.mktemp(), base)
home = realm.getHomeDirectory('alice@example.com')
self.assertEqual(
filepath.FilePath(base).child('alice@example.com'), home)
def test_defaultHomeDirectory(self):
"""
If no extra directory is passed to L{ftp.FTPRealm}, it uses C{"/home"}
as the base directory containing all user home directories.
"""
realm = ftp.FTPRealm(self.mktemp())
home = realm.getHomeDirectory('alice@example.com')
self.assertEqual(filepath.FilePath('/home/alice@example.com'), home)
class SystemFTPRealmTests(unittest.TestCase):
"""
Tests for L{ftp.SystemFTPRealm}.
"""
skip = nonPOSIXSkip
def test_getHomeDirectory(self):
"""
L{ftp.SystemFTPRealm.getHomeDirectory} treats the avatarId passed to it
as a username in the underlying platform and returns that account's home
directory.
"""
# Try to pick a username that will have a home directory.
user = getpass.getuser()
# Try to find their home directory in a different way than used by the
# implementation. Maybe this is silly and can only introduce spurious
# failures due to system-specific configurations.
import pwd
expected = pwd.getpwnam(user).pw_dir
realm = ftp.SystemFTPRealm(self.mktemp())
home = realm.getHomeDirectory(user)
self.assertEqual(home, filepath.FilePath(expected))
def test_noSuchUser(self):
"""
L{ftp.SystemFTPRealm.getHomeDirectory} raises L{UnauthorizedLogin} when
passed a username which has no corresponding home directory in the
system's accounts database.
"""
user = insecureRandom(4).encode('hex')
realm = ftp.SystemFTPRealm(self.mktemp())
self.assertRaises(UnauthorizedLogin, realm.getHomeDirectory, user)
class ErrnoToFailureTestCase(unittest.TestCase):
"""
Tests for L{ftp.errnoToFailure} errno checking.
"""
def test_notFound(self):
"""
C{errno.ENOENT} should be translated to L{ftp.FileNotFoundError}.
"""
d = ftp.errnoToFailure(errno.ENOENT, "foo")
return self.assertFailure(d, ftp.FileNotFoundError)
def test_permissionDenied(self):
"""
C{errno.EPERM} should be translated to L{ftp.PermissionDeniedError}.
"""
d = ftp.errnoToFailure(errno.EPERM, "foo")
return self.assertFailure(d, ftp.PermissionDeniedError)
def test_accessDenied(self):
"""
C{errno.EACCES} should be translated to L{ftp.PermissionDeniedError}.
"""
d = ftp.errnoToFailure(errno.EACCES, "foo")
return self.assertFailure(d, ftp.PermissionDeniedError)
def test_notDirectory(self):
"""
C{errno.ENOTDIR} should be translated to L{ftp.IsNotADirectoryError}.
"""
d = ftp.errnoToFailure(errno.ENOTDIR, "foo")
return self.assertFailure(d, ftp.IsNotADirectoryError)
def test_fileExists(self):
"""
C{errno.EEXIST} should be translated to L{ftp.FileExistsError}.
"""
d = ftp.errnoToFailure(errno.EEXIST, "foo")
return self.assertFailure(d, ftp.FileExistsError)
def test_isDirectory(self):
"""
C{errno.EISDIR} should be translated to L{ftp.IsADirectoryError}.
"""
d = ftp.errnoToFailure(errno.EISDIR, "foo")
return self.assertFailure(d, ftp.IsADirectoryError)
def test_passThrough(self):
"""
If an unknown errno is passed to L{ftp.errnoToFailure}, it should let
the originating exception pass through.
"""
try:
raise RuntimeError("bar")
except:
d = ftp.errnoToFailure(-1, "foo")
return self.assertFailure(d, RuntimeError)
class AnonymousFTPShellTestCase(unittest.TestCase):
"""
Test anynomous shell properties.
"""
def test_anonymousWrite(self):
"""
Check that L{ftp.FTPAnonymousShell} returns an error when trying to
open it in write mode.
"""
shell = ftp.FTPAnonymousShell('')
d = shell.openForWriting(('foo',))
self.assertFailure(d, ftp.PermissionDeniedError)
return d
class IFTPShellTestsMixin:
"""
Generic tests for the C{IFTPShell} interface.
"""
def directoryExists(self, path):
"""
Test if the directory exists at C{path}.
@param path: the relative path to check.
@type path: C{str}.
@return: C{True} if C{path} exists and is a directory, C{False} if
it's not the case
@rtype: C{bool}
"""
raise NotImplementedError()
def createDirectory(self, path):
"""
Create a directory in C{path}.
@param path: the relative path of the directory to create, with one
segment.
@type path: C{str}
"""
raise NotImplementedError()
def fileExists(self, path):
"""
Test if the file exists at C{path}.
@param path: the relative path to check.
@type path: C{str}.
@return: C{True} if C{path} exists and is a file, C{False} if it's not
the case.
@rtype: C{bool}
"""
raise NotImplementedError()
def createFile(self, path, fileContent=''):
"""
Create a file named C{path} with some content.
@param path: the relative path of the file to create, without
directory.
@type path: C{str}
@param fileContent: the content of the file.
@type fileContent: C{str}
"""
raise NotImplementedError()
def test_createDirectory(self):
"""
C{directoryExists} should report correctly about directory existence,
and C{createDirectory} should create a directory detectable by
C{directoryExists}.
"""
self.assertFalse(self.directoryExists('bar'))
self.createDirectory('bar')
self.assertTrue(self.directoryExists('bar'))
def test_createFile(self):
"""
C{fileExists} should report correctly about file existence, and
C{createFile} should create a file detectable by C{fileExists}.
"""
self.assertFalse(self.fileExists('file.txt'))
self.createFile('file.txt')
self.assertTrue(self.fileExists('file.txt'))
def test_makeDirectory(self):
"""
Create a directory and check it ends in the filesystem.
"""
d = self.shell.makeDirectory(('foo',))
def cb(result):
self.assertTrue(self.directoryExists('foo'))
return d.addCallback(cb)
def test_makeDirectoryError(self):
"""
Creating a directory that already exists should fail with a
C{ftp.FileExistsError}.
"""
self.createDirectory('foo')
d = self.shell.makeDirectory(('foo',))
return self.assertFailure(d, ftp.FileExistsError)
def test_removeDirectory(self):
"""
Try to remove a directory and check it's removed from the filesystem.
"""
self.createDirectory('bar')
d = self.shell.removeDirectory(('bar',))
def cb(result):
self.assertFalse(self.directoryExists('bar'))
return d.addCallback(cb)
def test_removeDirectoryOnFile(self):
"""
removeDirectory should not work in file and fail with a
C{ftp.IsNotADirectoryError}.
"""
self.createFile('file.txt')
d = self.shell.removeDirectory(('file.txt',))
return self.assertFailure(d, ftp.IsNotADirectoryError)
def test_removeNotExistingDirectory(self):
"""
Removing directory that doesn't exist should fail with a
C{ftp.FileNotFoundError}.
"""
d = self.shell.removeDirectory(('bar',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_removeFile(self):
"""
Try to remove a file and check it's removed from the filesystem.
"""
self.createFile('file.txt')
d = self.shell.removeFile(('file.txt',))
def cb(res):
self.assertFalse(self.fileExists('file.txt'))
d.addCallback(cb)
return d
def test_removeFileOnDirectory(self):
"""
removeFile should not work on directory.
"""
self.createDirectory('ned')
d = self.shell.removeFile(('ned',))
return self.assertFailure(d, ftp.IsADirectoryError)
def test_removeNotExistingFile(self):
"""
Try to remove a non existent file, and check it raises a
L{ftp.FileNotFoundError}.
"""
d = self.shell.removeFile(('foo',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_list(self):
"""
Check the output of the list method.
"""
self.createDirectory('ned')
self.createFile('file.txt')
d = self.shell.list(('.',))
def cb(l):
l.sort()
self.assertEqual(l,
[('file.txt', []), ('ned', [])])
return d.addCallback(cb)
def test_listWithStat(self):
"""
Check the output of list with asked stats.
"""
self.createDirectory('ned')
self.createFile('file.txt')
d = self.shell.list(('.',), ('size', 'permissions',))
def cb(l):
l.sort()
self.assertEqual(len(l), 2)
self.assertEqual(l[0][0], 'file.txt')
self.assertEqual(l[1][0], 'ned')
# Size and permissions are reported differently between platforms
# so just check they are present
self.assertEqual(len(l[0][1]), 2)
self.assertEqual(len(l[1][1]), 2)
return d.addCallback(cb)
def test_listWithInvalidStat(self):
"""
Querying an invalid stat should result to a C{AttributeError}.
"""
self.createDirectory('ned')
d = self.shell.list(('.',), ('size', 'whateverstat',))
return self.assertFailure(d, AttributeError)
def test_listFile(self):
"""
Check the output of the list method on a file.
"""
self.createFile('file.txt')
d = self.shell.list(('file.txt',))
def cb(l):
l.sort()
self.assertEqual(l,
[('file.txt', [])])
return d.addCallback(cb)
def test_listNotExistingDirectory(self):
"""
list on a directory that doesn't exist should fail with a
L{ftp.FileNotFoundError}.
"""
d = self.shell.list(('foo',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_access(self):
"""
Try to access a resource.
"""
self.createDirectory('ned')
d = self.shell.access(('ned',))
return d
def test_accessNotFound(self):
"""
access should fail on a resource that doesn't exist.
"""
d = self.shell.access(('foo',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_openForReading(self):
"""
Check that openForReading returns an object providing C{ftp.IReadFile}.
"""
self.createFile('file.txt')
d = self.shell.openForReading(('file.txt',))
def cb(res):
self.assertTrue(ftp.IReadFile.providedBy(res))
d.addCallback(cb)
return d
def test_openForReadingNotFound(self):
"""
openForReading should fail with a C{ftp.FileNotFoundError} on a file
that doesn't exist.
"""
d = self.shell.openForReading(('ned',))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_openForReadingOnDirectory(self):
"""
openForReading should not work on directory.
"""
self.createDirectory('ned')
d = self.shell.openForReading(('ned',))
return self.assertFailure(d, ftp.IsADirectoryError)
def test_openForWriting(self):
"""
Check that openForWriting returns an object providing C{ftp.IWriteFile}.
"""
d = self.shell.openForWriting(('foo',))
def cb1(res):
self.assertTrue(ftp.IWriteFile.providedBy(res))
return res.receive().addCallback(cb2)
def cb2(res):
self.assertTrue(IConsumer.providedBy(res))
d.addCallback(cb1)
return d
def test_openForWritingExistingDirectory(self):
"""
openForWriting should not be able to open a directory that already
exists.
"""
self.createDirectory('ned')
d = self.shell.openForWriting(('ned',))
return self.assertFailure(d, ftp.IsADirectoryError)
def test_openForWritingInNotExistingDirectory(self):
"""
openForWring should fail with a L{ftp.FileNotFoundError} if you specify
a file in a directory that doesn't exist.
"""
self.createDirectory('ned')
d = self.shell.openForWriting(('ned', 'idonotexist', 'foo'))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_statFile(self):
"""
Check the output of the stat method on a file.
"""
fileContent = 'wobble\n'
self.createFile('file.txt', fileContent)
d = self.shell.stat(('file.txt',), ('size', 'directory'))
def cb(res):
self.assertEqual(res[0], len(fileContent))
self.assertFalse(res[1])
d.addCallback(cb)
return d
def test_statDirectory(self):
"""
Check the output of the stat method on a directory.
"""
self.createDirectory('ned')
d = self.shell.stat(('ned',), ('size', 'directory'))
def cb(res):
self.assertTrue(res[1])
d.addCallback(cb)
return d
def test_statOwnerGroup(self):
"""
Check the owner and groups stats.
"""
self.createDirectory('ned')
d = self.shell.stat(('ned',), ('owner', 'group'))
def cb(res):
self.assertEqual(len(res), 2)
d.addCallback(cb)
return d
def test_statHardlinksNotImplemented(self):
"""
If L{twisted.python.filepath.FilePath.getNumberOfHardLinks} is not
implemented, the number returned is 0
"""
pathFunc = self.shell._path
def raiseNotImplemented():
raise NotImplementedError
def notImplementedFilePath(path):
f = pathFunc(path)
f.getNumberOfHardLinks = raiseNotImplemented
return f
self.shell._path = notImplementedFilePath
self.createDirectory('ned')
d = self.shell.stat(('ned',), ('hardlinks',))
self.assertEqual(self.successResultOf(d), [0])
def test_statOwnerGroupNotImplemented(self):
"""
If L{twisted.python.filepath.FilePath.getUserID} or
L{twisted.python.filepath.FilePath.getGroupID} are not implemented,
the owner returned is "0" and the group is returned as "0"
"""
pathFunc = self.shell._path
def raiseNotImplemented():
raise NotImplementedError
def notImplementedFilePath(path):
f = pathFunc(path)
f.getUserID = raiseNotImplemented
f.getGroupID = raiseNotImplemented
return f
self.shell._path = notImplementedFilePath
self.createDirectory('ned')
d = self.shell.stat(('ned',), ('owner', 'group'))
self.assertEqual(self.successResultOf(d), ["0", '0'])
def test_statNotExisting(self):
"""
stat should fail with L{ftp.FileNotFoundError} on a file that doesn't
exist.
"""
d = self.shell.stat(('foo',), ('size', 'directory'))
return self.assertFailure(d, ftp.FileNotFoundError)
def test_invalidStat(self):
"""
Querying an invalid stat should result to a C{AttributeError}.
"""
self.createDirectory('ned')
d = self.shell.stat(('ned',), ('size', 'whateverstat'))
return self.assertFailure(d, AttributeError)
def test_rename(self):
"""
Try to rename a directory.
"""
self.createDirectory('ned')
d = self.shell.rename(('ned',), ('foo',))
def cb(res):
self.assertTrue(self.directoryExists('foo'))
self.assertFalse(self.directoryExists('ned'))
return d.addCallback(cb)
def test_renameNotExisting(self):
"""
Renaming a directory that doesn't exist should fail with
L{ftp.FileNotFoundError}.
"""
d = self.shell.rename(('foo',), ('bar',))
return self.assertFailure(d, ftp.FileNotFoundError)
class FTPShellTestCase(unittest.TestCase, IFTPShellTestsMixin):
"""
Tests for the C{ftp.FTPShell} object.
"""
def setUp(self):
"""
Create a root directory and instantiate a shell.
"""
self.root = filepath.FilePath(self.mktemp())
self.root.createDirectory()
self.shell = ftp.FTPShell(self.root)
def directoryExists(self, path):
"""
Test if the directory exists at C{path}.
"""
return self.root.child(path).isdir()
def createDirectory(self, path):
"""
Create a directory in C{path}.
"""
return self.root.child(path).createDirectory()
def fileExists(self, path):
"""
Test if the file exists at C{path}.
"""
return self.root.child(path).isfile()
def createFile(self, path, fileContent=''):
"""
Create a file named C{path} with some content.
"""
return self.root.child(path).setContent(fileContent)
class TestConsumer(object):
"""
A simple consumer for tests. It only works with non-streaming producers.
@ivar producer: an object providing
L{twisted.internet.interfaces.IPullProducer}.
"""
implements(IConsumer)
producer = None
def registerProducer(self, producer, streaming):
"""
Simple register of producer, checks that no register has happened
before.
"""
assert self.producer is None
self.buffer = []
self.producer = producer
self.producer.resumeProducing()
def unregisterProducer(self):
"""
Unregister the producer, it should be done after a register.
"""
assert self.producer is not None
self.producer = None
def write(self, data):
"""
Save the data received.
"""
self.buffer.append(data)
self.producer.resumeProducing()
class TestProducer(object):
"""
A dumb producer.
"""
def __init__(self, toProduce, consumer):
"""
@param toProduce: data to write
@type toProduce: C{str}
@param consumer: the consumer of data.
@type consumer: C{IConsumer}
"""
self.toProduce = toProduce
self.consumer = consumer
def start(self):
"""
Send the data to consume.
"""
self.consumer.write(self.toProduce)
class IReadWriteTestsMixin:
"""
Generic tests for the C{IReadFile} and C{IWriteFile} interfaces.
"""
def getFileReader(self, content):
"""
Return an object providing C{IReadFile}, ready to send data C{content}.
"""
raise NotImplementedError()
def getFileWriter(self):
"""
Return an object providing C{IWriteFile}, ready to receive data.
"""
raise NotImplementedError()
def getFileContent(self):
"""
Return the content of the file used.
"""
raise NotImplementedError()
def test_read(self):
"""
Test L{ftp.IReadFile}: the implementation should have a send method
returning a C{Deferred} which fires when all the data has been sent
to the consumer, and the data should be correctly send to the consumer.
"""
content = 'wobble\n'
consumer = TestConsumer()
def cbGet(reader):
return reader.send(consumer).addCallback(cbSend)
def cbSend(res):
self.assertEqual("".join(consumer.buffer), content)
return self.getFileReader(content).addCallback(cbGet)
def test_write(self):
"""
Test L{ftp.IWriteFile}: the implementation should have a receive
method returning a C{Deferred} which fires with a consumer ready to
receive data to be written. It should also have a close() method that
returns a Deferred.
"""
content = 'elbbow\n'
def cbGet(writer):
return writer.receive().addCallback(cbReceive, writer)
def cbReceive(consumer, writer):
producer = TestProducer(content, consumer)
consumer.registerProducer(None, True)
producer.start()
consumer.unregisterProducer()
return writer.close().addCallback(cbClose)
def cbClose(ignored):
self.assertEqual(self.getFileContent(), content)
return self.getFileWriter().addCallback(cbGet)
class FTPReadWriteTestCase(unittest.TestCase, IReadWriteTestsMixin):
"""
Tests for C{ftp._FileReader} and C{ftp._FileWriter}, the objects returned
by the shell in C{openForReading}/C{openForWriting}.
"""
def setUp(self):
"""
Create a temporary file used later.
"""
self.root = filepath.FilePath(self.mktemp())
self.root.createDirectory()
self.shell = ftp.FTPShell(self.root)
self.filename = "file.txt"
def getFileReader(self, content):
"""
Return a C{ftp._FileReader} instance with a file opened for reading.
"""
self.root.child(self.filename).setContent(content)
return self.shell.openForReading((self.filename,))
def getFileWriter(self):
"""
Return a C{ftp._FileWriter} instance with a file opened for writing.
"""
return self.shell.openForWriting((self.filename,))
def getFileContent(self):
"""
Return the content of the temporary file.
"""
return self.root.child(self.filename).getContent()
class CloseTestWriter:
implements(ftp.IWriteFile)
closeStarted = False
def receive(self):
self.s = StringIO()
fc = ftp.FileConsumer(self.s)
return defer.succeed(fc)
def close(self):
self.closeStarted = True
return self.d
class CloseTestShell:
def openForWriting(self, segs):
return defer.succeed(self.writer)
class FTPCloseTest(unittest.TestCase):
"""Tests that the server invokes IWriteFile.close"""
def test_write(self):
"""Confirm that FTP uploads (i.e. ftp_STOR) correctly call and wait
upon the IWriteFile object's close() method"""
f = ftp.FTP()
f.workingDirectory = ["root"]
f.shell = CloseTestShell()
f.shell.writer = CloseTestWriter()
f.shell.writer.d = defer.Deferred()
f.factory = ftp.FTPFactory()
f.factory.timeOut = None
f.makeConnection(StringIO())
di = ftp.DTP()
di.factory = ftp.DTPFactory(f)
f.dtpInstance = di
di.makeConnection(None)#
stor_done = []
d = f.ftp_STOR("path")
d.addCallback(stor_done.append)
# the writer is still receiving data
self.assertFalse(f.shell.writer.closeStarted, "close() called early")
di.dataReceived("some data here")
self.assertFalse(f.shell.writer.closeStarted, "close() called early")
di.connectionLost("reason is ignored")
# now we should be waiting in close()
self.assertTrue(f.shell.writer.closeStarted, "close() not called")
self.assertFalse(stor_done)
f.shell.writer.d.callback("allow close() to finish")
self.assertTrue(stor_done)
return d # just in case an errback occurred
class FTPResponseCodeTests(unittest.TestCase):
"""
Tests relating directly to response codes.
"""
def test_unique(self):
"""
All of the response code globals (for example C{RESTART_MARKER_REPLY} or
C{USR_NAME_OK_NEED_PASS}) have unique values and are present in the
C{RESPONSE} dictionary.
"""
allValues = set(ftp.RESPONSE)
seenValues = set()
for key, value in vars(ftp).items():
if isinstance(value, str) and key.isupper():
self.assertIn(
value, allValues,
"Code %r with value %r missing from RESPONSE dict" % (
key, value))
self.assertNotIn(
value, seenValues,
"Duplicate code %r with value %r" % (key, value))
seenValues.add(value)
| 35.099237 | 94 | 0.60097 |
8fb8ab840a4186e2ba4157a1882c005d8182839b | 530 | py | Python | doc/src/tools/lib/sql_role.py | mrmilosz/psycopg2 | 34d3ac428c81b2dce93d3202af978e977de4b099 | [
"OpenSSL"
] | 6 | 2015-09-11T05:24:55.000Z | 2021-12-18T04:24:57.000Z | doc/src/tools/lib/sql_role.py | mrmilosz/psycopg2 | 34d3ac428c81b2dce93d3202af978e977de4b099 | [
"OpenSSL"
] | 1 | 2015-01-22T22:50:42.000Z | 2015-01-22T22:50:42.000Z | doc/src/tools/lib/sql_role.py | beyang/psycopg2 | 62dfd8e9841616b4b7eea12fc923b3ee57fc5cc4 | [
"OpenSSL"
] | 4 | 2016-02-15T19:11:47.000Z | 2022-03-16T18:41:25.000Z | # -*- coding: utf-8 -*-
"""
sql role
~~~~~~~~
An interpreted text role to style SQL syntax in Psycopg documentation.
:copyright: Copyright 2010 by Daniele Varrazzo.
"""
from docutils import nodes, utils
from docutils.parsers.rst import roles
def sql_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
options['classes'] = ['sql']
return [nodes.literal(rawtext, text, **options)], []
def setup(app):
roles.register_local_role('sql', sql_role)
| 24.090909 | 75 | 0.658491 |
9c58438e2c1f2392fd04f5a33ae27c61ccaf45c2 | 2,831 | py | Python | Sketches/MPS/Old/Sugar/silly.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/MPS/Old/Sugar/silly.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/MPS/Old/Sugar/silly.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Inspired by Damian Conway's syntax for encoding
# Flying Spaghetti Monsters in C++
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is valid python code. See the end for how/why/runnable code.
"""
Source(1)*outbox(box="outbox")
Source(1)*outbox(box="outbox") - Sink(1)*inbox(box="inbox")
Source(1)*outbox() ------- inbox("control")*Sink()*outbox("signal") -------- Sink(1)*inbox()
Source(1)---Filter(1)----Sink(1)
"""
#
class component(object):
def __init__(self, *args, **argd):
self.__dict__.update(argd)
self.args = args
def __neg__(self):
"pass through the previous expression unchanged"
return self
def __sub__(self, other):
print "SELF, OTHER", self, other
return self*outbox()-self*inbox()
def __radd__(self, other):
print "Self, other", self, other
return service(other,self)
class service(object):
def __init__(self, Component, Box):
self.Component = Component
self.Box = Box
def __neg__(self):
"pass through the previous expression unchanged"
return self
def __sub__(self, other):
return link(self, other)
class link(service):
def __init__(self, source, sink):
self.source = source
self.sink = sink
class box(object):
def __init__(self, box=None):
self.box = box
def __rmul__(self, other):
print self.box, other
return service(other,self)
def __mul__(self, other):
print self.box, other
return service(other,self)
def __neg__(self):
"pass through the previous expression unchanged"
return self
class inbox(box):
def __init__(self, box="inbox"):
self.box = box
class outbox(box):
def __init__(self, box="outbox"):
self.box = box
class Source(component): pass
class Filter(component): pass
class Sink(component): pass
Source(1)*outbox(box="outbox")
Source(1)*outbox(box="outbox") - Sink(1)*inbox(box="inbox")
Source(1)*outbox() ------- inbox("control")*Sink()*outbox("signal") -------- Sink(1)*inbox()
Source(1)---Filter(1)----Sink(1)
| 29.185567 | 92 | 0.674673 |
d164a9074b4f6193d859d49a16daf38de09854ef | 995 | py | Python | i2a/latent_space/latent_space_imagination_core.py | FlorianKlemt/pytorch-latent-i2a | 36809bf3adda1fcffaccd27e352b7ad2338060a7 | [
"MIT"
] | 3 | 2019-02-24T07:37:36.000Z | 2020-03-17T16:00:38.000Z | i2a/latent_space/latent_space_imagination_core.py | FlorianKlemt/pytorch-latent-i2a | 36809bf3adda1fcffaccd27e352b7ad2338060a7 | [
"MIT"
] | null | null | null | i2a/latent_space/latent_space_imagination_core.py | FlorianKlemt/pytorch-latent-i2a | 36809bf3adda1fcffaccd27e352b7ad2338060a7 | [
"MIT"
] | null | null | null | from torch import nn
class LatentSpaceImaginationCore(nn.Module):
def __init__(self, env_model=None, rollout_policy=None):
super(LatentSpaceImaginationCore, self).__init__()
self.env_model = env_model
self.rollout_policy = rollout_policy
def forward(self, latent_state, action):
next_latent_state, z_prior = self.env_model.next_latent_space(latent_state, action)
reward = self.env_model.reward(next_latent_state)
return next_latent_state, z_prior, reward
def encode(self, observation_initial_context):
latent_space = self.env_model.encode(observation_initial_context)
return latent_space
def decode(self, latent_space, z_prior):
predicted_observation = self.env_model.decode(latent_space, z_prior)
return predicted_observation
def sample(self, latent_space):
value, actor = self.rollout_policy(latent_space)
action = self.rollout_policy.sample(actor)
return action
| 36.851852 | 91 | 0.730653 |
affba4e354ce25e2efdfa2faa3b7b5a0b9da6808 | 6,651 | py | Python | main.py | James822/JBhangman | da74e4ebc046404bd7141149f911fc4c10dd24c9 | [
"MIT"
] | null | null | null | main.py | James822/JBhangman | da74e4ebc046404bd7141149f911fc4c10dd24c9 | [
"MIT"
] | null | null | null | main.py | James822/JBhangman | da74e4ebc046404bd7141149f911fc4c10dd24c9 | [
"MIT"
] | null | null | null | MAX_WORD_LENGTH = 11
MIN_WORD_LENGTH = 5
def gui(word, found_list, guesss_list, hang_state):
if hang_state == 0:
print("\t\t______________")
print("\t\t| |")
print("\t\t| |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t _______")
for n in range(len(word)):
if found_list[n] != "":
print (found_list[n] + " ", end="")
else:
print("_ ", end="")
print("\n")
return False
if hang_state == 1:
print("\t\t______________")
print("\t\t| |")
print("\t\t| |")
print("\t\t0 |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t _______")
for n in range(len(word)):
if found_list[n] != "":
print (found_list[n] + " ", end="")
else:
print("_ ", end="")
print("\n")
return False
if hang_state == 2:
print("\t\t______________")
print("\t\t| |")
print("\t\t| |")
print("\t\t0 |")
print("\t\t| |")
print("\t\t| |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t _______")
for n in range(len(word)):
if found_list[n] != "":
print (found_list[n] + " ", end="")
else:
print("_ ", end="")
print("\n")
return False
if hang_state == 3:
print("\t\t______________")
print("\t\t| |")
print("\t\t| |")
print("\t\t0 |")
print("\t --| |")
print("\t\t| |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t _______")
for n in range(len(word)):
if found_list[n] != "":
print (found_list[n] + " ", end="")
else:
print("_ ", end="")
print("\n")
return False
if hang_state == 4:
print("\t\t______________")
print("\t\t| |")
print("\t\t| |")
print("\t\t0 |")
print("\t --|-- |")
print("\t\t| |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t _______")
for n in range(len(word)):
if found_list[n] != "":
print (found_list[n] + " ", end="")
else:
print("_ ", end="")
print("\n")
return False
if hang_state == 5:
print("\t\t______________")
print("\t\t| |")
print("\t\t| |")
print("\t\t0 |")
print("\t --|-- |")
print("\t\t| |")
print("\t / |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t _______")
for n in range(len(word)):
if found_list[n] != "":
print (found_list[n] + " ", end="")
else:
print("_ ", end="")
print("\n")
return False
if hang_state == 6:
print("\t\t______________")
print("\t\t| |")
print("\t\t| |")
print("\t\t0 |")
print("\t --|-- |")
print("\t\t| |")
print("\t / \ |")
print("\t\t |")
print("\t\t |")
print("\t\t |")
print("\t\t _______")
for n in range(len(word)):
if found_list[n] != "":
print (found_list[n] + " ", end="")
else:
print("_ ", end="")
print("\n")
print("YOU LOSE! BETTER LUCK NEXT TIME!")
return True
def start():
"""
function that starts the hangman game
"""
word = input("Please enter the word to be guessed: ").lower()
while ( len(word) < MIN_WORD_LENGTH ) or ( len(word) > MAX_WORD_LENGTH ):
word = input("Error, word must be > 5 and < 11 letters. Enter again: ").lower()
game_end = False
guess_letter = ""
found_list = [""] * len(word) # correct guesses
guess_list = [] # failed guesses (all other guesses)
hang_state = 0
while not game_end: #while game is running
game_end = gui(word,found_list, guess_list, hang_state)
if game_end == False:
guess_letter = input("Guess the next letter: ").lower() # Guess a letter
while(len(guess_letter) > 1 or len(guess_letter) < 1):
print("The letter must be length 1")
guess_letter = input("Guess the next letter: ").lower() # Guess a letter
while guess_letter in guess_list or guess_letter in found_list:
print("You already guessed this letter!")
guess_letter = input("Guess the next letter: ").lower() # Guess a letter
for charnum in range(len(word)): #for each letter in the word
if guess_letter == word[charnum]:
if found_list[charnum] == "":
found_list[charnum] = guess_letter.lower()
else:
if charnum == (len(word) - 1) and guess_letter not in found_list:
print("That letter does not exist in the word...")
guess_list.append(guess_letter.lower())
hang_state += 1
finalString = ''.join(found_list)
if str(finalString) == str(word):
print("You got it! The word was " + word + "!")
game_end = True
if __name__ == "__main__":
start()
| 36.543956 | 88 | 0.375733 |
8df692a1acdb118d56b869846827ee01a4bee330 | 3,012 | py | Python | tests/contract/KT1EXWcSD1W3CqHCyU3m8xXDcctYrdkRJ5yA/test_exwcsd.py | bantalon/pytezos | e538fb47a0879e70af3c0c074348a5edc3a94372 | [
"MIT"
] | null | null | null | tests/contract/KT1EXWcSD1W3CqHCyU3m8xXDcctYrdkRJ5yA/test_exwcsd.py | bantalon/pytezos | e538fb47a0879e70af3c0c074348a5edc3a94372 | [
"MIT"
] | null | null | null | tests/contract/KT1EXWcSD1W3CqHCyU3m8xXDcctYrdkRJ5yA/test_exwcsd.py | bantalon/pytezos | e538fb47a0879e70af3c0c074348a5edc3a94372 | [
"MIT"
] | null | null | null | from unittest import TestCase
from os.path import dirname, join
import json
from pytezos.michelson.micheline import get_script_section
from pytezos.michelson.types.base import MichelsonType
from pytezos.michelson.program import MichelsonProgram
from pytezos.michelson.format import micheline_to_michelson
from pytezos.michelson.parse import michelson_to_micheline
from pytezos.michelson.forge import forge_micheline, unforge_micheline
folder = 'dexter_usdtz_xtz'
class MainnetContractTestCaseEXWCSD(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
cls.script = script
with open(join(dirname(__file__), f'', '__entrypoints__.json')) as f:
entrypoints = json.loads(f.read())
cls.entrypoints = entrypoints
# cls.maxDiff = None
def test_parameter_type_exwcsd(self):
type_expr = self.program.parameter.as_micheline_expr()
self.assertEqual(
get_script_section(self.script, 'parameter'),
type_expr,
'micheline -> type -> micheline')
def test_entrypoints_exwcsd(self):
ep_types = self.program.parameter.list_entrypoints()
self.assertEqual(len(self.entrypoints['entrypoints']) + 1, len(ep_types))
for name, ep_type in ep_types.items():
if name not in ['default', 'root']:
expected_type = MichelsonType.match(self.entrypoints['entrypoints'][name])
expected_type.assert_type_equal(ep_type)
def test_storage_type_exwcsd(self):
type_expr = self.program.storage.as_micheline_expr()
self.assertEqual(
get_script_section(self.script, 'storage'),
type_expr,
'micheline -> type -> micheline')
def test_storage_encoding_exwcsd(self):
val = self.program.storage.from_micheline_value(self.script['storage'])
val_expr = val.to_micheline_value(mode='legacy_optimized')
self.assertEqual(self.script['storage'], val_expr, 'micheline -> value -> micheline')
val_ = self.program.storage.from_python_object(val.to_python_object())
val_expr_ = val_.to_micheline_value(mode='legacy_optimized')
self.assertEqual(self.script['storage'], val_expr_, 'value -> pyobj -> value -> micheline')
def test_script_parsing_formatting(self):
actual = michelson_to_micheline(micheline_to_michelson(self.script['code']))
self.assertEqual(self.script['code'], actual)
def test_storage_forging(self):
expected = self.script['storage']
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
expected = self.program.storage.from_micheline_value(expected).to_micheline_value(mode='readable')
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
| 40.702703 | 106 | 0.699867 |
446648b845a6d28907040ea99c17b913a69ce990 | 6,420 | py | Python | imagenet-pretrain/lednet_imagenet.py | barnjamin/LEDNet | 073fcd0b2f83a5b7ff7706e0295a7ff5727b4ab1 | [
"MIT"
] | null | null | null | imagenet-pretrain/lednet_imagenet.py | barnjamin/LEDNet | 073fcd0b2f83a5b7ff7706e0295a7ff5727b4ab1 | [
"MIT"
] | null | null | null | imagenet-pretrain/lednet_imagenet.py | barnjamin/LEDNet | 073fcd0b2f83a5b7ff7706e0295a7ff5727b4ab1 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
def channel_shuffle(x,groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
#reshape
x = x.view(batchsize,groups,
channels_per_group,height,width)
x = torch.transpose(x,1,2).contiguous()
#flatten
x = x.view(batchsize,-1,height,width)
return x
class Conv2dBnRelu(nn.Module):
def __init__(self,in_ch,out_ch,kernel_size=3,stride=1,padding=0,dilation=1,bias=True):
super(Conv2dBnRelu,self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch,out_ch,kernel_size,stride,padding,dilation=dilation,bias=bias),
nn.BatchNorm2d(out_ch, eps=1e-3),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.conv(x)
##after Concat -> BN, you also can use Dropout like SS_nbt_module may be make a good result!
class DownsamplerBlock (nn.Module):
def __init__(self, in_channel, out_channel):
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channel-in_channel, (3, 3), stride=2, padding=1, bias=True)
self.pool = nn.MaxPool2d(2, stride=2)
self.bn = nn.BatchNorm2d(out_channel, eps=1e-3)
self.relu = nn.ReLU(inplace=True)
def forward(self, input):
output = torch.cat([self.conv(input), self.pool(input)], 1)
output = self.bn(output)
output = self.relu(output)
return output
class SS_nbt_module(nn.Module):
def __init__(self, chann, dropprob, dilated):
super().__init__()
oup_inc = chann//2
#dw
self.conv3x1_1_l = nn.Conv2d(oup_inc, oup_inc, (3,1), stride=1, padding=(1,0), bias=True)
self.conv1x3_1_l = nn.Conv2d(oup_inc, oup_inc, (1,3), stride=1, padding=(0,1), bias=True)
self.bn1_l = nn.BatchNorm2d(oup_inc, eps=1e-03)
self.conv3x1_2_l = nn.Conv2d(oup_inc, oup_inc, (3,1), stride=1, padding=(1*dilated,0), bias=True, dilation = (dilated,1))
self.conv1x3_2_l = nn.Conv2d(oup_inc, oup_inc, (1,3), stride=1, padding=(0,1*dilated), bias=True, dilation = (1,dilated))
self.bn2_l = nn.BatchNorm2d(oup_inc, eps=1e-03)
#dw
self.conv3x1_1_r = nn.Conv2d(oup_inc, oup_inc, (3,1), stride=1, padding=(1,0), bias=True)
self.conv1x3_1_r = nn.Conv2d(oup_inc, oup_inc, (1,3), stride=1, padding=(0,1), bias=True)
self.bn1_r = nn.BatchNorm2d(oup_inc, eps=1e-03)
self.conv3x1_2_r = nn.Conv2d(oup_inc, oup_inc, (3,1), stride=1, padding=(1*dilated,0), bias=True, dilation = (dilated,1))
self.conv1x3_2_r = nn.Conv2d(oup_inc, oup_inc, (1,3), stride=1, padding=(0,1*dilated), bias=True, dilation = (1,dilated))
self.bn2_r = nn.BatchNorm2d(oup_inc, eps=1e-03)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(dropprob)
@staticmethod
def _concat(x,out):
return torch.cat((x,out),1)
def forward(self, input):
x1 = input[:,:(input.shape[1]//2),:,:]
x2 = input[:,(input.shape[1]//2):,:,:]
output1 = self.conv3x1_1_l(x1)
output1 = self.relu(output1)
output1 = self.conv1x3_1_l(output1)
output1 = self.bn1_l(output1)
output1 = self.relu(output1)
output1 = self.conv3x1_2_l(output1)
output1 = self.relu(output1)
output1 = self.conv1x3_2_l(output1)
output1 = self.bn2_l(output1)
output2 = self.conv1x3_1_r(x2)
output2 = self.relu(output2)
output2 = self.conv3x1_1_r(output2)
output2 = self.bn1_r(output2)
output2 = self.relu(output2)
output2 = self.conv1x3_2_r(output2)
output2 = self.relu(output2)
output2 = self.conv3x1_2_r(output2)
output2 = self.bn2_r(output2)
if (self.dropout.p != 0):
output1 = self.dropout(output1)
output2 = self.dropout(output2)
out = self._concat(output1,output2)
out = F.relu(input+out,inplace=True)
return channel_shuffle(out,2)
class Encoder(nn.Module):
def __init__(self):
super().__init__()
self.initial_block = DownsamplerBlock(3,32)
self.layers = nn.ModuleList()
for x in range(0, 3):
self.layers.append(SS_nbt_module(32, 0.03, 1))
self.layers.append(DownsamplerBlock(32,64))
for x in range(0, 2):
self.layers.append(SS_nbt_module(64, 0.03, 1))
self.layers.append(DownsamplerBlock(64,128))
for x in range(0, 1):
self.layers.append(SS_nbt_module(128, 0.3, 1))
self.layers.append(SS_nbt_module(128, 0.3, 2))
self.layers.append(SS_nbt_module(128, 0.3, 5))
self.layers.append(SS_nbt_module(128, 0.3, 9))
for x in range(0, 1):
self.layers.append(SS_nbt_module(128, 0.3, 2))
self.layers.append(SS_nbt_module(128, 0.3, 5))
self.layers.append(SS_nbt_module(128, 0.3, 9))
self.layers.append(SS_nbt_module(128, 0.3, 17))
def forward(self, input):
output = self.initial_block(input)
for layer in self.layers:
output = layer(output)
return output
class Features(nn.Module):
def __init__(self):
super().__init__()
self.encoder = Encoder()
self.extralayer1 = nn.MaxPool2d(2, stride=2)
self.extralayer2 = nn.AvgPool2d(14,1,0)
def forward(self, input):
output = self.encoder(input)
output = self.extralayer1(output)
output = self.extralayer2(output)
return output
class Classifier(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.linear = nn.Linear(128, num_classes)
def forward(self, input):
output = input.view(input.size(0), 128) #first is batch_size
output = self.linear(output)
return output
class LEDNet(nn.Module):
def __init__(self, num_classes): #use encoder to pass pretrained encoder
super().__init__()
self.features = Features()
self.classifier = Classifier(num_classes)
def forward(self, input):
output = self.features(input)
output = self.classifier(output)
return output
| 31.165049 | 129 | 0.610436 |
e110a53253eedceb1cc620998e719e1b2944dcf5 | 14,976 | py | Python | ruth_demo.py | RuthAngus/celerotation | 6959d3f9e09ef206491253ac49df9dc3987c2bd9 | [
"MIT"
] | null | null | null | ruth_demo.py | RuthAngus/celerotation | 6959d3f9e09ef206491253ac49df9dc3987c2bd9 | [
"MIT"
] | null | null | null | ruth_demo.py | RuthAngus/celerotation | 6959d3f9e09ef206491253ac49df9dc3987c2bd9 | [
"MIT"
] | null | null | null | # coding: utf-8
# Code for measuring rotation periods using celerite.
from matplotlib import rcParams
rcParams["savefig.dpi"] = 150
rcParams["figure.dpi"] = 150
import corner
import emcee3
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from astropy.stats import LombScargle
import celerite
from celerite import terms
from rotate.k2 import get_light_curve
from rotate.rotation_term import RotationTerm, MixtureTerm, MixtureOfSHOsTerm
import numpy as np
from scipy.linalg import cho_factor, cho_solve
from celerite import modeling
import sys
import kplr
client = kplr.API()
import kepler_data as kd
# Download a light curve.
kepid = int(sys.argv[1]) # 205117205
if kepid >= 200000000:
sections, t, flux = get_light_curve(2, kepid)
else:
# include sections in load_kepler_data, one section for each quarter.
kepstar = client.star(kepid)
kepstar.get_light_curves(fetch=True, short_cadence=False)
LC_DIR = "/Users/ruthangus/.kplr/data/lightcurves/{}".format(str(kepid)
.zfill(9))
sections, t, flux, flux_err = kd.load_kepler_data(LC_DIR)
m = 2000
sections, t, flux = sections[:m], t[:m], flux[:m]
flux *= 1e4
flux0 = np.array(flux) - np.mean(flux) # Mean subtract
# Fit and remove straight line trend for the periodogram.
A = np.vander(t - np.mean(flux), 4)
w = np.linalg.solve(np.dot(A.T, A), np.dot(A.T, flux))
trend = np.dot(A, w)
# First guess at the period
min_period, max_period = 0.1, 30.0
# Compute periodogram.
freq = np.linspace(1.0 / max_period, 1.0 / min_period, 5000)
model = LombScargle(t, flux - trend)
power = model.power(freq, method="fast", normalization="psd")
power /= len(t)
power0 = np.array(power)
# Filter (reduce amplitudes at low frequencies)
freq0 = 1.0 / 10.0
filt = 1.0 / np.sqrt(1 + (freq0 / freq) ** (2*3))
power *= filt
# Find highest peak
period = 1.0 / freq[np.argmax(power)]
print("Periodogram period = ", period, "days")
# Plot light curve and periodogram
plt.clf()
fig, axes = plt.subplots(1, 2, figsize=(8, 3))
axes[0].scatter(t, flux, c=sections, s=3)
axes[0].set_ylabel("relative flux [pph]")
axes[0].set_xlabel("time [days]")
axes[1].plot(1.0 / freq, power, "k")
axes[1].axvline(period, color="k", alpha=0.5)
axes[1].axvline(0.5 * period, color="k", ls="dashed", alpha=0.5)
axes[1].set_xlabel("period [days]")
axes[1].set_ylabel("LS periodogram")
axes[1].annotate("period = {0:.3f} d".format(period), xy=(1, 1), xycoords="axes fraction",
ha="right", va="top", xytext=(-5, -5), textcoords="offset points")
axes[1].set_xlim(min_period, max_period)
fig.set_tight_layout(True)
fig.savefig("plots/{}_lc_pgram".format(kepid))
# Define the Celerite kernel
log_var = np.log(np.var(flux))
kernel = MixtureTerm(
log_a1=log_var,
log_b1=-0.1,
log_f1=-5.0,
log_P=np.log(period),
mix_par=-1.0,
log_b2=-0.1,
log_f2=-5.0,
bounds=dict(
log_a1=(-10.0, 10.0),
log_b1=(-5.0, 5.0),
log_f1=(-5.0, 5.0),
log_P=(np.log(min_period), np.log(max_period)),
mix_par=(-5.0, 5.0),
log_b2=(-5.0, 5.0),
log_f2=(-5.0, 5.0),
),
)
kernel += terms.SHOTerm(
log_S0=log_var,
log_Q=-0.5*np.log(2),
log_omega0=np.log(2*np.pi/10.0),
bounds=dict(
log_S0=(-20.0, 20.0),
log_omega0=(np.log(2*np.pi/80.0), np.log(2*np.pi/2.0))
)
)
kernel.terms[1].freeze_parameter("log_Q")
kernel += terms.JitterTerm(log_sigma=np.log(np.median(np.abs(np.diff(flux)))),
bounds=[(-10.0, 10.0)])
mean = celerite.modeling.ConstantModel(np.mean(flux), bounds=[(-5000.0, 5000.0)])
mean.freeze_parameter("value")
gp = celerite.GP(kernel, mean=mean)
gp.compute(t)
class PolynomialModel(modeling.ModelSet):
def __init__(self, gp, sections, t, y, order=3):
self.t = t
self.y = y
A = np.vander((t - np.mean(t)) / (np.max(t) - np.min(t)), order)
sections = np.atleast_1d(sections)
s = np.unique(sections)
self.A = np.zeros((len(t), order*len(s)))
for i in s:
m = sections == i
self.A[m, order*i:order*i+order] = A[m]
self.AT = self.A.T
super(PolynomialModel, self).__init__([("gp", gp)])
def get_weights(self):
gp = self.models["gp"]
gp.compute(self.t)
alpha = gp.apply_inverse(self.y)[:, 0]
KinvA = gp.apply_inverse(self.A)
S = np.dot(self.AT, KinvA)
S[np.diag_indices_from(S)] += 1e-8
factor = cho_factor(S, overwrite_a=True, check_finite=False)
ATalpha = np.dot(self.AT, alpha)
return cho_solve(factor, ATalpha, overwrite_b=True, check_finite=False), alpha
def alpha(self):
gp = self.models["gp"]
gp.compute(self.t)
alpha = gp.apply_inverse(self.y)[:, 0]
KinvA = gp.apply_inverse(self.A)
S = np.dot(self.AT, KinvA)
S[np.diag_indices_from(S)] += 1e-8
factor = cho_factor(S, overwrite_a=True, check_finite=False)
half_log_det = 0.5 * gp.solver.log_determinant()
half_log_det += np.sum(np.log(np.diag(factor[0])))
ATalpha = np.dot(self.AT, alpha)
term2 = np.dot(KinvA, cho_solve(factor, ATalpha,
check_finite=False, overwrite_b=True))
half_log_det = 0.5 * gp.solver.log_determinant()
half_log_det += np.sum(np.log(np.diag(factor[0])))
return alpha - term2, half_log_det
def log_marginalized_likelihood(self):
try:
alpha, half_log_det = self.alpha()
except (celerite.solver.LinAlgError, np.linalg.LinAlgError):
return -np.inf
return -0.5*np.dot(self.y, alpha) - half_log_det
def get_trend(self):
w, _ = self.get_weights()
return np.dot(self.A, w)
def predict(self):
gp = self.models["gp"]
w, alpha = self.get_weights()
resid = self.y - np.dot(self.A, w)
return self.y - gp.kernel.jitter * gp.apply_inverse(resid)[:, 0]
model = PolynomialModel(gp, sections, t, flux)
model.log_marginalized_likelihood()
def nll(params):
model.set_parameter_vector(params)
try:
return -model.log_marginalized_likelihood()
except celerite.solver.LinAlgError:
return 1e10
# Period selection
gp.freeze_parameter("kernel:terms[0]:log_P")
p0 = gp.get_parameter_vector()
log_P0 = gp.get_parameter("kernel:terms[0]:log_P")
best = (np.inf, p0)
for log_period in log_P0 + np.log([0.5, 1.0, 2.0]):
if not np.allclose(log_period, log_P0) and not (2*min_period < np.exp(log_period) < max_period):
continue
gp.set_parameter("kernel:terms[0]:log_P", log_period)
gp.freeze_parameter("kernel:terms[0]:log_P")
bounds = gp.get_parameter_bounds()
soln = minimize(nll, p0, method="L-BFGS-B", bounds=bounds)
gp.set_parameter_vector(soln.x)
gp.thaw_parameter("kernel:terms[0]:log_P")
bounds = gp.get_parameter_bounds()
soln = minimize(nll, gp.get_parameter_vector(), method="L-BFGS-B", bounds=bounds)
gp.set_parameter_vector(soln.x)
print(np.exp(gp.get_parameter("kernel:terms[0]:log_P")), soln.fun, soln.success)
if soln.fun < best[0]:
best = soln.fun, soln.x
gp.set_parameter_vector(best[1])
flux = flux0 - model.get_trend()
gp.get_parameter_dict()
# Plot the psd of each celerite term.
omega = 2 * np.pi * freq
plt.clf()
plt.plot(1./freq, gp.kernel.get_psd(omega))
for term in gp.kernel.terms:
plt.plot(1./freq, term.get_psd(omega))
plt.savefig("plots/{}_psd1".format(kepid))
sigma_mask = np.ones(len(t), dtype=bool)
def nll(params):
gp.set_parameter_vector(params)
try:
gp.compute(t[sigma_mask])
except celerite.solver.LinAlgError:
return 1e10, np.zeros_like(params)
grad = gp.grad_log_likelihood(flux[sigma_mask])
return -grad[0], -grad[1]
gp.thaw_parameter("mean:value")
gp.freeze_parameter("kernel:terms[0]:log_P")
p0 = gp.get_parameter_vector()
log_P0 = gp.get_parameter("kernel:terms[0]:log_P")
best = (np.inf, p0)
for log_period in log_P0 + np.log([0.5, 1.0, 2.0]):
if not np.allclose(log_period, log_P0) and not (2*min_period < np.exp(log_period) < max_period):
continue
gp.set_parameter("kernel:terms[0]:log_P", log_period)
gp.freeze_parameter("kernel:terms[0]:log_P")
bounds = gp.get_parameter_bounds()
soln = minimize(nll, p0, jac=True, method="L-BFGS-B", bounds=bounds)
gp.set_parameter_vector(soln.x)
gp.thaw_parameter("kernel:terms[0]:log_P")
bounds = gp.get_parameter_bounds()
soln = minimize(nll, gp.get_parameter_vector(), jac=True, method="L-BFGS-B", bounds=bounds)
gp.set_parameter_vector(soln.x)
print(np.exp(gp.get_parameter("kernel:terms[0]:log_P")), soln.fun, soln.success)
if soln.fun < best[0]:
best = soln.fun, soln.x
gp.set_parameter_vector(best[1])
# Sigma clipping
for i in range(10):
bounds = gp.get_parameter_bounds()
soln = minimize(nll, gp.get_parameter_vector(), jac=True, method="L-BFGS-B", bounds=bounds)
gp.set_parameter_vector(soln.x)
mu, var = gp.predict(flux[sigma_mask], t, return_var=True)
m = np.abs(flux - mu) < 7 * np.sqrt(var)
print(m.sum(), sigma_mask.sum())
if m.sum() == sigma_mask.sum():
break
sigma_mask = m
gp.compute(t[sigma_mask])
import copy
map_gp = copy.deepcopy(gp)
# Plot the GP prediction.
x = np.linspace(t.min() - 3, t.max() + 3, 5000)
pred_mu, pred_var = gp.predict(flux[sigma_mask], x, return_var=True)
pred_sig = np.sqrt(pred_var)
plt.clf()
fig, ax = plt.subplots(1, 1, figsize=(5, 3))
ax.plot(t, flux, ".k", ms=3)
ax.plot(x, pred_mu, "g", lw=0.7)
ax.fill_between(x, pred_mu+pred_sig, pred_mu-pred_sig, color="g",
alpha=0.5, lw=0)
ax.set_ylabel("relative flux [pph]")
ax.set_xlabel("time [days]")
ax.set_xlim(x.min(), x.max())
ml_period = np.exp(gp.kernel.terms[0].log_P)
ax.annotate("period = {0:.3f} d".format(ml_period),
xy=(1, 1), xycoords="axes fraction",
ha="right", va="top", xytext=(-5, -5), textcoords="offset points")
fig.savefig("plots/{}_prediction".format(kepid))
gp.get_parameter_dict()
# Plot samples from the covariance matrix with optimised parameters.
gp2 = celerite.GP(terms.TermSum(*(kernel.terms[:-1])))
x = np.linspace(0, 80, 1500)
gp2.compute(x, 1e-8)
y = gp2.sample(size=5)
plt.clf()
plt.plot(x, y.T)
plt.plot(t - t.min(), flux, ".k", ms=3)
plt.xlabel("time")
plt.ylabel("relative flux [pph]")
plt.savefig("plots/sims.png")
# Plot the psd for each celerite term.
omega = 2 * np.pi * freq
plt.plot(1./freq, gp.kernel.get_psd(omega))
for term in gp.kernel.terms:
plt.plot(1./freq, term.get_psd(omega))
plt.savefig("plots/{}_psd2".format(kepid))
# Plot what the actual kernel looks like. (The covariance matrix moving away from the diagonal)
dt = np.linspace(0, period*10, 5000)
plt.clf()
plt.plot(dt, gp.kernel.get_value(dt))
plt.savefig("plots/{}_kernel1".format(kepid))
# Now sample the posterior using emcee.
def log_prob(params):
gp.set_parameter_vector(params)
lp = gp.log_prior()
if not np.isfinite(lp):
return -np.inf
try:
return lp + gp.log_likelihood(flux[sigma_mask])
except celerite.solver.LinAlgError:
return -np.inf
ndim = len(best[1])
nwalkers = 64
pos = best[1] + 1e-5 * np.random.randn(nwalkers, ndim)
lp = np.array(list(map(log_prob, pos)))
m = ~np.isfinite(lp)
while np.any(m):
pos[m] = best[1] + 1e-5 * np.random.randn(m.sum(), ndim)
lp[m] = np.array(list(map(log_prob, pos[m])))
m = ~np.isfinite(lp)
sampler = emcee3.Sampler([
emcee3.moves.StretchMove(),
emcee3.moves.DEMove(1e-3),
emcee3.moves.KDEMove(),
]) #, backend=emcee3.backends.HDFBackend("astero-{0}.h5".format(epicid)))
with emcee3.pools.InterruptiblePool() as pool:
ensemble = emcee3.Ensemble(emcee3.SimpleModel(log_prob), pos,
pool=pool)
ensemble = sampler.run(ensemble, 5000, progress=True)
# Plot the individual chains. Vertical blue lines show 5 x autocorrelation time.
chain = sampler.get_coords()
names = gp.get_parameter_names()
names = [n.split(":")[-1].replace("_", " ") for n in names]
ndim = chain.shape[-1]
tau = 500 * np.ones(ndim)
for c in range(5, 0, -1):
try:
tau = sampler.get_integrated_autocorr_time(c=c)
except emcee3.autocorr.AutocorrError:
continue
print("Final c is {0}".format(c))
break
samples = sampler.get_coords(flat=True, discard=max(int(tau[3]*5), 1000))
logprob_chain = sampler.get_log_probability(flat=True, discard=max(int(tau[3]*5), 1000))
plt.clf()
fig, axes = plt.subplots(ndim, 1, figsize=(8, ndim*1), sharex=True)
for i in range(ndim):
ax = axes[i]
ax.plot(chain[:, :, i], "k", alpha=0.3)
ax.set_xlim(0, len(chain))
ax.set_ylabel(names[i])
ax.axvline(5*tau[i], alpha=0.5, lw=3)
ax.axhline(best[1][i], alpha=0.8)
ax.yaxis.set_label_coords(-0.1, 0.5)
fig.subplots_adjust(hspace=0)
fig.savefig("plots/{}_chains".format(kepid))
# Plot the marginals.
plt.clf()
fig = corner.corner(samples, truths=best[1], labels=names);
fig.savefig("plots/{}_corner".format(kepid))
# Plot a histogram of just the rotation period samples.
plt.clf()
fig, ax = plt.subplots(1, 1, figsize=(3, 3))
ax.hist(np.exp(samples[:, 3]), 50, color="k", histtype="step")
ax.axvline(period, ls="--")
ax.axvline(ml_period, ls=":")
ax.set_yticks([])
ax.set_xlabel("period [days]");
fig.savefig("plots/{}_prot_hist".format(kepid))
# Plot the light curve, folded on the rotation period.
plt.clf()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3), sharey=True)
map_sample = samples[np.argmax(logprob_chain)]
map_period = np.exp(map_sample[3])
print("Maximum a-posteriori period = ", map_period)
ax1.plot(t, flux, ".k", ms=3)
ax2.scatter(t % map_period, flux, c=t, s=2, alpha=0.5)
x = np.linspace(t.min() - 3, t.max() + 3, 5000)
for s in samples[np.random.randint(len(samples), size=25)]:
gp.set_parameter_vector(s)
ax1.plot(x, gp.predict(flux[sigma_mask], x, return_cov=False), "g", alpha=0.3, lw=0.5)
ax2.annotate("period = {0:.3f} d".format(map_period),
xy=(1, 1), xycoords="axes fraction",
ha="right", va="top", xytext=(-5, -5), textcoords="offset points");
ax1.set_ylabel("relative flux [pph]")
ax1.set_xlabel("time [days]")
ax1.set_xlim(x.min(), x.max())
ax2.set_xlabel("phase [days]")
ax2.set_xlim(0, map_period);
fig.savefig("plots/{}_folded".format(kepid))
# Plot the covariance of each celerite term.
dt = np.linspace(0, map_period*10, 5000)
gp.set_parameter_vector(map_sample)
plt.clf()
plt.plot(dt, gp.kernel.get_value(dt))
for term in gp.kernel.terms:
plt.plot(dt, term.get_value(dt))
plt.savefig("plots/{}_kernel2".format(kepid))
# Plot the psd of each term.
omega = 2 * np.pi * freq
plt.clf()
plt.plot(1./freq, gp.kernel.get_psd(omega))
for term in gp.kernel.terms:
plt.plot(1./freq, term.get_psd(omega))
plt.savefig("plots/{}_psd2".format(kepid))
| 31.86383 | 100 | 0.655716 |
8e6d45df1019d4a13ff808aec0ccad9e03717d91 | 1,419 | py | Python | tests/nn/test_clipped_nn.py | ssameerr/pyro | c04fc931631ec9e8694def207b5ca0e432d5e501 | [
"MIT"
] | null | null | null | tests/nn/test_clipped_nn.py | ssameerr/pyro | c04fc931631ec9e8694def207b5ca0e432d5e501 | [
"MIT"
] | null | null | null | tests/nn/test_clipped_nn.py | ssameerr/pyro | c04fc931631ec9e8694def207b5ca0e432d5e501 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import pytest
import torch
from torch.autograd import Variable
from pyro.nn.clipped_nn import ClippedSigmoid, ClippedSoftmax
from tests.common import assert_equal
@pytest.mark.parametrize('Tensor', [torch.FloatTensor, torch.DoubleTensor])
def test_clipped_softmax(Tensor):
epsilon = 1e-5
try:
clipped_softmax = ClippedSoftmax(epsilon, dim=1)
except TypeError:
# Support older pytorch 0.2 release.
clipped_softmax = ClippedSoftmax(epsilon)
ps = Variable(Tensor([[0, 1]]))
softmax_ps = clipped_softmax(ps)
print("epsilon = {}".format(epsilon))
print("softmax_ps = {}".format(softmax_ps))
assert (softmax_ps.data >= epsilon).all()
assert (softmax_ps.data <= 1 - epsilon).all()
assert_equal(softmax_ps.data.sum(), 1.0)
@pytest.mark.parametrize('Tensor', [torch.FloatTensor, torch.DoubleTensor])
def test_clipped_sigmoid(Tensor):
epsilon = 1e-5
try:
clipped_sigmoid = ClippedSigmoid(epsilon, dim=1)
except TypeError:
# Support older pytorch 0.2 release.
clipped_sigmoid = ClippedSigmoid(epsilon)
ps = Variable(Tensor([0, 1]))
sigmoid_ps = clipped_sigmoid(ps)
print("epsilon = {}".format(epsilon))
print("sigmoid_ps = {}".format(sigmoid_ps))
assert (sigmoid_ps.data >= epsilon).all()
assert (sigmoid_ps.data <= 1 - epsilon).all()
| 33.785714 | 75 | 0.701198 |
7067946c73d92d979e71a00d33397b02ad174949 | 13,061 | py | Python | swarmdjango/core/logParsers/parsers.py | YCP-Swarm-Robotics-Capstone-2020-2021/swarm-website-backend | 081d1930cc9283ee299d373f91f7c127f466c104 | [
"MIT"
] | null | null | null | swarmdjango/core/logParsers/parsers.py | YCP-Swarm-Robotics-Capstone-2020-2021/swarm-website-backend | 081d1930cc9283ee299d373f91f7c127f466c104 | [
"MIT"
] | 51 | 2020-08-31T16:50:09.000Z | 2021-05-10T03:04:18.000Z | swarmdjango/core/logParsers/parsers.py | YCP-Swarm-Robotics-Capstone-2020-2021/swarm-website-backend | 081d1930cc9283ee299d373f91f7c127f466c104 | [
"MIT"
] | null | null | null | import itertools
import re
import json
import sys
# Log parser for the web application
# Parameter is the file path of the log
def web_parser(file_path):
# Open the log file in read mode
file = open(file_path, 'r')
# Check to see what type of log file this is, and set log_type and robot_id appropriately
if "LOG_Narwhal" in file.name:
# print("This is a Narwhal log")
log_type = "Narwhal"
device_id = "Narwhal"
elif "LOG_Dolphin" in file.name:
# print("This is a Dolphin log")
log_type = "Dolphin"
# Extract dolphin id
robot_match = re.search(r'Dolphin\d+', file.name)
# if robot_match.group(0):
try:
device_id = robot_match.group(0)
except AttributeError:
# print('Error finding robot id from file name')
sys.exit(1)
# If the log does not contain dolphin or narwhal exit the parser
else:
# print('Log neither Dolphin nor Narwhal')
sys.exit(1)
# Parse date and time from file path
# The leading _ differentiates the date and time from robot id
matches = re.findall(r'_[0-9]+_[0-9]+_[0-9]+', file.name)
try:
# Split the date and time from the underscores
date_parts = matches[0].split('_')
time_parts = matches[1].split('_')
# Recreate the date and times with appropriate separator
date = '-'.join(date_parts[1:])
time = ':'.join(time_parts[1:])
# If the splits fail, they will throw an index error, which is caught here
except IndexError:
print('Error getting date and time from file name')
sys.exit(1)
# Parsed script data
parsed = {
"device_id": device_id,
"date": date,
"time": time,
"log_type": log_type,
"log_content": []
}
# This set will filter duplicates out, and then be sorted on timestamp
parsed_set = set()
runs = []
# Read the file, and place the tuples of the lines in the set
for line in itertools.islice(file, 5, None):
line = line.rstrip()
try:
# Each line is composed of <timestamp> <module> <process> <data>
(time, module, process, data) = line.split(maxsplit=3)
# Check for null character
if '\u0000' in line:
data = data.split('\u0000')[0]
parsed_set.add((time, module, process, data))
except ValueError as e:
print("Key Value Error")
# Convert set to list, then sort
parsed_list = list(parsed_set)
parsed_list.sort(key=sort_on)
# Current run is outside the scope of the for loop, since it needs to persist each iteration
current_run = ''
record_run = False
# Iterate sorted list and created json objects
for i in parsed_list:
parsed_line = {
'time': i[0],
'module': i[1],
'process': i[2],
'data': i[3]
}
parsed['log_content'].append(parsed_line)
# Check to see if the current line is a start of stop marker for a run
# If the current run is empty, then start filling out the current run
if parsed_line['module'] == 'RUN_STARTED' and current_run == '':
# Parse id and place in current run
current_run_id = int(re.findall(r'[0-9]+', parsed_line['data'])[0])
# noinspection PyDictCreation
current_run = {
'run_id': current_run_id,
'start_time': parsed_line['time'],
'stop_time': '',
'run_content': []
}
# Append current run to runs list, and clear the current run
runs.append(current_run)
# Start recording the run
record_run = True
elif parsed_line['module'] == 'RUN_ENDED' and current_run != '':
# Parse stop time
current_run['stop_time'] = parsed_line['time']
# Append stop line to run
current_run['run_content'].append(parsed_line)
current_run = ''
record_run = False
if record_run:
# Append stop line to run
current_run['run_content'].append(parsed_line)
# Close log file
file.close()
# Open new json file, write the json contents, and close it
with open(file_path + ".json", "w+") as file:
file.write(json.dumps(parsed))
# Return the information on the log for storing in DB
try:
del parsed['log_content']
except KeyError:
print('Error removing log_content from dict')
# print(json.dumps(parsed))
for run in runs:
run_key = run['run_id']
# Log name with run appended to it
run_name = file_path + f"-run{run_key}"
# Write the run
with open(run_name + ".json", "w+") as file:
file.write(json.dumps(run))
# If it's a Narwhal log file, run it through the visualization parser
if "Narwhal" in log_type:
visualization_parser(run, run_name + ".script")
# Delete the run_content from the dictionary to do some memory cleanup
try:
del run['run_content']
except KeyError:
print('Error removing run_content from dict')
# print(json.dumps(run))
return json.dumps(parsed), json.dumps(runs)
# This function defines what to sort the list on. The tuple has the timestamp in the first position
def sort_on(e):
return float(e[0])
# Log parser for visualization script generation
# Currently, this just parses the Narwhal's log file
def visualization_parser(input_json, output_file):
# To which decimal place should the timestamp be rounded
TIME_ROUNDING = 1
# The increment in which the current time should progess when generating the final script.
# This corresponds to TIME_ROUNDING
TIME_INCREMENT = 0.1
start_time = round(float(input_json["run_content"][-1]["time"]), TIME_ROUNDING)
stop_time = 0
# Which robots are reported as being connected at each timestamp
connected_robots = dict() # dict(k: time, v: set(robot_id))
# Parsed script data
parsed = dict() # dict(k: time, v: dict(k: id, v: data))
for obj in input_json["run_content"]:
time = round(float(obj["time"]), TIME_ROUNDING)
if "Registered_Bots" in obj["module"]:
# Data format is Bot_Ids=<id>:0|<id>:0|...
# TODO: I'm not sure what the ":0" postfix means
# Collect the robot ids
robots = obj["data"].split("=")[-1].split("|")
# Remove the ":0" from each id and discard any empty strings
robots = [robot.split(":")[0] for robot in robots if robot]
# Since this module lists ALL robots currently known to be connected, just directly set the connected robots
connected_robots[time] = set(robots)
elif "Reg_In" in obj["module"]:
# Data format is id=<id>
robot_id = obj["data"].split("=")[-1]
connected_robots.setdefault(time, set()).add(robot_id)
elif "Reg_Ack" in obj["module"]:
# Module format is <id>_Reg_Ack
robot_id = obj["module"].split("_")[0]
if "true" in obj["data"]:
connected_robots.setdefault(time, set()).add(robot_id)
else:
connected_robots.setdefault(time, set()).remove(robot_id)
# Update_Pos is robot reporting new position
elif "Update_Pos" in obj["module"]:
if time < start_time: start_time = time
if time > stop_time: stop_time = time
# Remove all whitespace from data
obj["data"] = re.sub(r"\s+", "", obj["data"]).split(",")
# Get key-value pairs from the data
items = dict()
for item in obj["data"]:
(lhs, rhs) = item.split("=")
items[lhs] = rhs
if "id" in items:
parsed.setdefault(time, dict())[items["id"]] = \
{
"x": round(float(items.get("xPos") or 0.0), 3),
"y": round(float(items.get("yPos") or 0.0), 3),
"r": round(float(items.get("attitude") or 0.0), 3),
"s": round(float(items.get("current_speed") or 0.0), 3)
}
# Fill in any time gaps
current_time = start_time
# Last known connected status for each robot
prev_connected = set()
# Last known value for each robot
prev_data = dict() # dict(k: id, v: data)
while current_time <= stop_time:
# Populate connected_robots with last known status if entry does not already exist,
# other update last known status
if current_time in connected_robots:
prev_connected = connected_robots[current_time]
else:
connected_robots[current_time] = prev_connected
# If parsed data has current time, update the last known data for each robot
if current_time in parsed:
for (robot_id, data) in parsed[current_time].items():
prev_data[robot_id] = data
else:
# Have to directly set this here instead of using `setdefault(time, dict())` later on to make sure that
# there is an empty entry here if no robots are connected
parsed[current_time] = dict()
# For the current time entry in the parsed data, fill in any missing robots with their last known data
# if the robot is still known to be connected
for (robot_id, data) in prev_data.items():
if robot_id not in parsed[current_time] and robot_id in connected_robots[current_time]:
parsed[current_time][robot_id] = data
current_time = round(current_time + TIME_INCREMENT, TIME_ROUNDING)
# Remove any non updated robots for a given timestamp
# Last time updated pos for each robot
# NOTE: 'u' stands for 'updated' and 'nu' stands for 'notUpdated'. This is to
# cut down the file size slightly by removing unnecessary characters
last_updated_times = dict() # dict(k: robot_id, v: time)
for time in parsed.keys():
if parsed[time]:
idle_robots = []
parsed[time]['u'] = []
for (robot_id, data) in parsed[time].items():
if robot_id != 'u':
# If it is robot's first update time, make last updated
if robot_id not in last_updated_times.keys():
last_updated_times[robot_id] = time
# Add 'id' field and append to updated list
data['id'] = robot_id
parsed[time]['u'].append(data)
else:
# Get robot's data from it's last updated time
# Add 'id' field
last_updated_time = last_updated_times[robot_id]
prev_data = parsed[last_updated_time][robot_id]
data['id'] = robot_id
# Compare with current data
# If same, add to idle_robots list
# If different, update last_updated_times
if prev_data != data:
last_updated_times[robot_id] = time
parsed[time]['u'].append(data)
else:
idle_robots.append(robot_id)
# Add 'notUpdated' object to each timestamp
parsed[time]['nu'] = []
# For each idle robot for this timestamp delete it's entry, and add robot_id to 'notUpdated' object
for robot_id in idle_robots:
del parsed[time][robot_id]
parsed[time]['nu'].append(robot_id)
# Remove any first level 'Dolphin__: {}' objects, since there is now a first level 'updated: []' list for each timestamp
for time in parsed.keys():
if parsed[time]:
for data in parsed[time]['u']:
del parsed[time][data['id']]
# Change the values of parsed into lists rather than dictionaries
# This is to prevent many small hashmaps from being created while deserializing the script in the visualization
listified_parsed = list()
for (time, states) in parsed.items():
if parsed[time]:
updated_data = states['u']
not_updated_date = states['nu']
timestamp = {"t": time, "u": [], "nu": []}
timestamp['u'] = updated_data
timestamp['nu'] = not_updated_date
listified_parsed.append(timestamp)
else:
parsed[time]['u'] = []
parsed[time]['nu'] = []
output = {"timeinc": TIME_INCREMENT, "timeround": TIME_ROUNDING, "timestart": start_time, "timeend": stop_time,
"timestamps": listified_parsed}
with open(output_file, "w+") as f:
f.write(json.dumps(output))
| 38.756677 | 124 | 0.57844 |
be8aff5fb2c2bb3f68288962aca38f1fa7a406ce | 263 | py | Python | contests/atcoder/abc179/abc179_a/main.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | contests/atcoder/abc179/abc179_a/main.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | contests/atcoder/abc179/abc179_a/main.py | conao3/coder | 2cdb610fec013da88a3470d460108e8a9b462445 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
from typing import *
# def solve(S: str) -> str:
def solve(S):
if S[-1:] == 's':
return S+'es'
else:
return S+'s'
def main():
S = input()
a = solve(S)
print(a)
if __name__ == '__main__':
main()
| 12.52381 | 27 | 0.494297 |
e6d2225911f97b40c047ac68c2448642f975b33a | 10,251 | py | Python | contrib/spendfrom/spendfrom.py | tocacyy/GoldCoin | 049f6bef41ff57eaa1b6a156a2b7e86ccef200eb | [
"MIT"
] | 1 | 2021-05-12T19:29:36.000Z | 2021-05-12T19:29:36.000Z | contrib/spendfrom/spendfrom.py | tocacyy/GoldCoin | 049f6bef41ff57eaa1b6a156a2b7e86ccef200eb | [
"MIT"
] | null | null | null | contrib/spendfrom/spendfrom.py | tocacyy/GoldCoin | 049f6bef41ff57eaa1b6a156a2b7e86ccef200eb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the eurocoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Eurocoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Eurocoin")
return os.path.expanduser("~/.eurocoin")
def read_bitcoin_config(dbdir):
"""Read the eurocoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "eurocoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a eurocoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 112043 if testnet else 12043
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get eurocoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send eurocoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of eurocoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| 37.826568 | 111 | 0.635548 |
e18590f43bac1317d1bf8e398d61303c00a54cdb | 223 | py | Python | main/PluginDemos/connectivity_elongation_fast/Simulation/ConnectivityElongation.py | JulianoGianlupi/nh-cc3d-4x-base-tool | c0f4aceebd4c5bf3ec39e831ef851e419b161259 | [
"CC0-1.0"
] | null | null | null | main/PluginDemos/connectivity_elongation_fast/Simulation/ConnectivityElongation.py | JulianoGianlupi/nh-cc3d-4x-base-tool | c0f4aceebd4c5bf3ec39e831ef851e419b161259 | [
"CC0-1.0"
] | null | null | null | main/PluginDemos/connectivity_elongation_fast/Simulation/ConnectivityElongation.py | JulianoGianlupi/nh-cc3d-4x-base-tool | c0f4aceebd4c5bf3ec39e831ef851e419b161259 | [
"CC0-1.0"
] | 1 | 2021-02-26T21:50:29.000Z | 2021-02-26T21:50:29.000Z | from cc3d import CompuCellSetup
from .ConnectivityElongationSteppable import ConnectivityElongationSteppable
CompuCellSetup.register_steppable(steppable=ConnectivityElongationSteppable(frequency=50))
CompuCellSetup.run()
| 31.857143 | 90 | 0.896861 |
fc1d7930bd452a2890aa83cc035e35c7f07b72ed | 3,806 | py | Python | tests/test_api.py | Guilherme-Macena/tutorial-pybr | faf38ecda69130fc3971299d736fe11bd12c0eeb | [
"MIT"
] | null | null | null | tests/test_api.py | Guilherme-Macena/tutorial-pybr | faf38ecda69130fc3971299d736fe11bd12c0eeb | [
"MIT"
] | null | null | null | tests/test_api.py | Guilherme-Macena/tutorial-pybr | faf38ecda69130fc3971299d736fe11bd12c0eeb | [
"MIT"
] | null | null | null | from http import HTTPStatus
from typing import List
from uuid import UUID
import pytest
from fastapi.testclient import TestClient
from api_pedidos.api import app, recuperar_itens_por_pedido
from api_pedidos.esquema import Item
from api_pedidos.excecao import (
FalhaDeComunicacaoError,
PedidoNaoEncontradoError,
)
@pytest.fixture
def cliente():
return TestClient(app)
@pytest.fixture
def sobreescreve_recuperar_itens_por_pedido():
def _sobreescreve_recuperar_itens_por_pedido(itens_ou_erro):
def duble(identificacao_do_pedido: UUID) -> List[Item]:
if isinstance(itens_ou_erro, Exception):
raise itens_ou_erro
return itens_ou_erro
app.dependency_overrides[recuperar_itens_por_pedido] = duble
yield _sobreescreve_recuperar_itens_por_pedido
app.dependency_overrides.clear()
class TestHealthCheck:
def test_devo_ter_como_retorno_codigo_de_status_200(self, cliente):
resposta = cliente.get("/healthcheck")
assert resposta.status_code == HTTPStatus.OK
def test_formato_de_retorno_deve_ser_json(self, cliente):
resposta = cliente.get("/healthcheck")
assert resposta.headers["Content-Type"] == "application/json"
def test_deve_conter_informacoes(self, cliente):
resposta = cliente.get("/healthcheck")
assert resposta.json() == {"status": "ok"}
class TestListarPedidos:
def test_quando_indentifi_do_pedido_invalido_retorna_erro(self, cliente):
resposta = cliente.get("/orders/valor-invalido/items")
assert resposta.status_code == HTTPStatus.UNPROCESSABLE_ENTITY
def test_quando_pedido_nao_encontrado_um_erro_deve_ser_retornado(
self, cliente
):
def duble(identificacao_do_pedido: UUID) -> List[Item]:
raise PedidoNaoEncontradoError()
app.dependency_overrides[recuperar_itens_por_pedido] = duble
resposta = cliente.get(
"/orders/ea78b59b-885d-4e7b-9cd0-d54acadb4933/items"
)
assert resposta.status_code == HTTPStatus.NOT_FOUND
def test_quando_encontrar_pedido_codigo_ok_deve_ser_retornado(
self, cliente
):
def duble(identificacao_do_pedido: UUID) -> List[Item]:
return []
app.dependency_overrides[recuperar_itens_por_pedido] = duble
resposta = cliente.get(
"/orders/7e290683-d67b-4f96-a940-44bef1f69d21/items"
)
assert resposta.status_code == HTTPStatus.OK
def test_quando_encontrar_pedido_deve_retornar_itens(self, cliente):
itens = [
Item(
sku="1",
description="Item 1",
image_url="http://url.com/img1",
reference="ref1",
quantity=1,
),
Item(
sku="2",
description="Item 2",
image_url="http://url.com/img2",
reference="ref2",
quantity=2,
),
]
def duble(identificacao_do_pedido: UUID) -> List[Item]:
return itens
app.dependency_overrides[recuperar_itens_por_pedido] = duble
resposta = cliente.get(
"/orders/7e290683-d67b-4f96-a940-44bef1f69d21/items"
)
assert resposta.json() == itens
def test_quando_fonte_de_pedido_falha_um_erro_deve_ser_retornado(
self, cliente, sobreescreve_recuperar_itens_por_pedido
):
sobreescreve_recuperar_itens_por_pedido(FalhaDeComunicacaoError())
resposta = cliente.get(
"/orders/ea78b59b-885d-4e7b-9cd0-d54acadb4933/items"
)
assert resposta.status_code == HTTPStatus.BAD_GATEWAY
| 33.681416 | 78 | 0.651077 |
0489d858fbe1e108951d8f92430369d40d0db2db | 5,182 | py | Python | src/data_utils/data_writer.py | knmac/LCDC_release | f977ca1cda972983cac7e33b324f07f2e1463a19 | [
"MIT"
] | 24 | 2019-09-18T09:22:08.000Z | 2022-03-08T06:47:33.000Z | src/data_utils/data_writer.py | knmac/LCDC_release | f977ca1cda972983cac7e33b324f07f2e1463a19 | [
"MIT"
] | 6 | 2019-09-18T09:21:02.000Z | 2022-02-09T23:31:48.000Z | src/data_utils/data_writer.py | knmac/LCDC_release | f977ca1cda972983cac7e33b324f07f2e1463a19 | [
"MIT"
] | 4 | 2020-08-06T02:05:36.000Z | 2021-12-12T07:19:17.000Z | import tensorflow as tf
from random import shuffle
import numpy as np
import os
from skimage import io
from skimage.transform import resize
from progressbar import ProgressBar
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class DataWriter(object):
def __init__(self):
pass
# Abstract methods---------------------------------------------------------
@abc.abstractmethod
def load_im_pths(self):
return
@abc.abstractmethod
def load_snippet_pths(self):
return
# Public methods-----------------------------------------------------------
def write_record(self, fname_pths, labels, output_dir, output_pattern,
to_shuffle=False, record_size=5000):
""" Write tfrecord for images
Args:
fname_pths: a list where each item is a string (path of an image)
or a list of string (frames of a snippet)
labels: a list of labels, must be the same length as fname_pths
output_dir: a string, where tfrecord files are generated
output_pattern: a string, pattern of the tfrecord file name
to_shuffle: boolean, whether fname_pths and labels should shuffle.
the relative positions between fname_pths and labels is
kept after shuffling
record_size: number of images per tfrecord file
"""
assert type(fname_pths) is list
assert type(labels) is list
assert len(fname_pths) == len(labels)
# make output dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# shuffle list if needed
if to_shuffle:
zipped = list(zip(fname_pths, labels))
shuffle(zipped)
fname_pths, labels = zip(*zipped)
# making tfrecord files
print('Making tfrecord...')
writer = None
N = len(labels)
pbar = ProgressBar(max_value=N)
record_cnt = 0
for i in range(N):
# split into multiple files to avoid big tfrecords
if i % record_size == 0:
# close opened writer
# unopened writer means the first writer
if writer is not None:
writer.close()
# prepare new writer
record_cnt += 1
record_fname = os.path.join(
output_dir, output_pattern + \
'_{:05d}.tfrecord'.format(record_cnt))
writer = tf.python_io.TFRecordWriter(record_fname)
# build feature
feat = self._build_feature(fname_pths[i], labels[i])
# write feature to tfrecord file
example = tf.train.Example(features=tf.train.Features(
feature=feat))
writer.write(example.SerializeToString())
# update progress bar
pbar.update(i)
pbar.update(N)
writer.close()
pass
# Private methods----------------------------------------------------------
def _build_feature(self, fname_pths, lbl):
""" Build feature from a single image or a snippet
Args:
fname_pths: a string (for single image mode) or a list of string
(for snippet mode)
lbl: a single integer
Returns:
feat: dictionary of feature
"""
assert (type(fname_pths) is str) or (type(fname_pths) is list), \
'fname_pths must be either a string or a list'
assert type(lbl) is int
if type(fname_pths) is str:
img = self._read_img(fname_pths)
feat = {'label': self._int64_feature(lbl),
'image': self._bytes_feature(tf.compat.as_bytes(
img.tostring()))}
else:
N = len(fname_pths)
# retrive image size to allocate snippet
h, w, k = self._read_img(fname_pths[0]).shape
snippet = np.zeros([N, h, w, k], dtype=np.uint8)
for i in range(N):
snippet[i] = self._read_img(fname_pths[i])
feat = {'label': self._int64_feature(lbl),
'snippet': self._bytes_feature(tf.compat.as_bytes(
snippet.tostring()))}
return feat
def _read_img(self, im_pth):
""" Read a single image
Args:
im_pth: full path of the image to read
Returns:
img: raw image content
"""
assert os.path.exists(im_pth)
img = io.imread(im_pth)
assert img.shape[2] == 3, 'Only allow 3-channel images'
return img
def _int64_feature(self, value):
""" Convert value to int64 feature for TfRecord
Args:
value: value to convert
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(self, value):
""" Convert value to bytes feature for TfRecord
Args:
value: value to convert
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
| 33.432258 | 79 | 0.549209 |
67a5c44baf11c759c0903a6bd79cd2971e2ee889 | 129 | py | Python | th2c/response.py | vladmunteanu/th2c | 97defe9cd531010970aeba68a59be921ea3ed81a | [
"MIT"
] | 82 | 2017-11-15T09:11:48.000Z | 2022-02-20T08:45:23.000Z | th2c/response.py | vladmunteanu/th2c | 97defe9cd531010970aeba68a59be921ea3ed81a | [
"MIT"
] | 8 | 2017-12-11T09:24:44.000Z | 2020-02-03T13:58:03.000Z | th2c/response.py | vladmunteanu/th2c | 97defe9cd531010970aeba68a59be921ea3ed81a | [
"MIT"
] | 10 | 2017-12-24T11:20:21.000Z | 2019-09-11T10:14:39.000Z | from __future__ import absolute_import
from tornado.httpclient import HTTPResponse
class HTTP2Response(HTTPResponse):
pass
| 18.428571 | 43 | 0.837209 |
69f8d6ff24cdcf820238452d3f085779a48d0d19 | 1,193 | py | Python | setup.py | Tedyst/blockify | bb9e4ba498589d0e44b5dc5c9212d01897ca2b5d | [
"MIT"
] | 235 | 2016-07-13T19:14:33.000Z | 2022-03-26T21:27:10.000Z | setup.py | Tedyst/blockify | bb9e4ba498589d0e44b5dc5c9212d01897ca2b5d | [
"MIT"
] | 71 | 2015-01-10T13:58:17.000Z | 2016-07-05T14:51:48.000Z | setup.py | Tedyst/blockify | bb9e4ba498589d0e44b5dc5c9212d01897ca2b5d | [
"MIT"
] | 32 | 2016-09-27T05:20:42.000Z | 2021-05-13T21:11:35.000Z | from os.path import dirname, join
from blockify.util import VERSION
from setuptools import setup, find_packages
def read(filename):
text = ""
try:
with open(join(dirname(__file__), filename)) as f:
text = f.read()
except Exception as e:
text = "{0}: {1}".format(e, filename)
return text
_name = "blockify"
_license = "MIT"
_description = read("README.md")
setup(
name=_name,
description="Mute spotify advertisements.",
long_description=_description,
keywords=["spotify", "music", "commercials", "adblock"],
version=VERSION,
license=_license,
url="https://github.com/serialoverflow/{}".format(_name),
download_url="https://github.com/serialoverflow/blockify/tarball/v{0}".format(VERSION),
author="Max Falk",
author_email="gmdfalk@gmail.com",
packages=find_packages(),
package_data={_name: ["data/*"]},
include_package_data=True,
entry_points={
"console_scripts": [
"{0} = {0}.cli:main".format(_name),
"{0}-dbus = {0}.dbusclient:main".format(_name),
],
"gui_scripts": [
"{0}-ui = {0}.gui:main".format(_name),
],
}
)
| 25.934783 | 91 | 0.615256 |
ae1141f202facbd7535660a934cd9d92259eaf58 | 752 | py | Python | tests/test_cli.py | melund/conda | 2348aa1863c2bd0c536f29c510a97fbc85a2ad89 | [
"BSD-3-Clause"
] | null | null | null | tests/test_cli.py | melund/conda | 2348aa1863c2bd0c536f29c510a97fbc85a2ad89 | [
"BSD-3-Clause"
] | null | null | null | tests/test_cli.py | melund/conda | 2348aa1863c2bd0c536f29c510a97fbc85a2ad89 | [
"BSD-3-Clause"
] | null | null | null | import unittest
from conda.cli.common import arg2spec
class TestArg2Spec(unittest.TestCase):
def test_simple(self):
self.assertEqual(arg2spec('python'), 'python')
self.assertEqual(arg2spec('python=2.6'), 'python 2.6*')
self.assertEqual(arg2spec('ipython=0.13.2'), 'ipython 0.13.2*')
self.assertEqual(arg2spec('ipython=0.13.0'), 'ipython 0.13|0.13.0*')
self.assertEqual(arg2spec('foo=1.3.0=3'), 'foo 1.3.0 3')
def test_invalid_char(self):
self.assertRaises(SystemExit, arg2spec, 'abc%def')
self.assertRaises(SystemExit, arg2spec, '!xyz 1.3')
def test_too_long(self):
self.assertRaises(SystemExit, arg2spec, 'foo=1.3=2=4')
if __name__ == '__main__':
unittest.main()
| 30.08 | 76 | 0.658245 |
293f393512a9d4b35aeab847bb8d5200c16e0e57 | 1,808 | py | Python | rx/core/operators/publish.py | mmpio/RxPY | 4ed60bb5c04aa85de5210e5537a6adfe1b667d50 | [
"MIT"
] | 4,342 | 2015-01-06T09:00:23.000Z | 2022-03-28T15:05:50.000Z | rx/core/operators/publish.py | mmpio/RxPY | 4ed60bb5c04aa85de5210e5537a6adfe1b667d50 | [
"MIT"
] | 613 | 2015-01-07T20:44:56.000Z | 2022-03-20T06:14:20.000Z | rx/core/operators/publish.py | mmpio/RxPY | 4ed60bb5c04aa85de5210e5537a6adfe1b667d50 | [
"MIT"
] | 420 | 2015-01-07T14:30:30.000Z | 2022-03-11T22:47:46.000Z | from typing import Callable, Optional
from rx import operators as ops
from rx.core import Observable, ConnectableObservable, pipe
from rx.core.typing import Mapper
from rx.subject import Subject
def _publish(mapper: Optional[Mapper] = None) -> Callable[[Observable], ConnectableObservable]:
"""Returns an observable sequence that is the result of invoking the
mapper on a connectable observable sequence that shares a single
subscription to the underlying sequence. This operator is a
specialization of Multicast using a regular Subject.
Example:
>>> res = publish()
>>> res = publish(lambda x: x)
mapper: [Optional] Selector function which can use the
multicasted source sequence as many times as needed, without causing
multiple subscriptions to the source sequence. Subscribers to the
given source will receive all notifications of the source from the
time of the subscription on.
Returns:
An observable sequence that contains the elements of a sequence
produced by multicasting the source sequence within a mapper
function.
"""
if mapper:
return pipe(ops.multicast(subject_factory=lambda _: Subject(), mapper=mapper))
return pipe(ops.multicast(subject=Subject()))
def _share() -> Callable[[Observable], Observable]:
"""Share a single subscription among multple observers.
Returns a new Observable that multicasts (shares) the original
Observable. As long as there is at least one Subscriber this
Observable will be subscribed and emitting data. When all
subscribers have unsubscribed it will unsubscribe from the source
Observable.
This is an alias for a composed publish() and ref_count().
"""
return pipe(_publish(), ops.ref_count())
| 36.897959 | 95 | 0.72677 |
a32ce9af95b407693f16a1eb5d5adbb4af3b6227 | 867 | py | Python | goodreads_api_client/tests/test_client.py | fossabot/goodreads-api-client-python | d58424aea92e808e8e9c6bbf6ecc42f084a6c642 | [
"MIT"
] | 63 | 2017-12-03T02:54:05.000Z | 2022-02-01T23:28:32.000Z | goodreads_api_client/tests/test_client.py | fossabot/goodreads-api-client-python | d58424aea92e808e8e9c6bbf6ecc42f084a6c642 | [
"MIT"
] | 13 | 2017-08-13T17:28:06.000Z | 2019-06-08T05:49:11.000Z | goodreads_api_client/tests/test_client.py | fossabot/goodreads-api-client-python | d58424aea92e808e8e9c6bbf6ecc42f084a6c642 | [
"MIT"
] | 15 | 2017-12-09T00:56:05.000Z | 2021-01-26T22:42:09.000Z | import unittest
from goodreads_api_client.client import Client
from goodreads_api_client.tests.conftest import (
developer_key, developer_secret, vcr)
class TestClient(unittest.TestCase):
def setUp(self):
self._client = Client(developer_key=developer_key,
developer_secret=developer_secret)
@vcr.use_cassette('search/author.yaml')
def test_search_author(self):
result = self._client.search_author('Murakami')
self.assertEqual(result['name'], 'Haruki Murakami')
@vcr.use_cassette('search/book.yaml')
def test_search_book(self):
result = self._client.search_book(q='Highprince of War')
self.assertEqual(int(result['total-results']), 1)
def tearDown(self):
if self._client._transport.is_using_session():
self._client._transport.session.close()
| 30.964286 | 64 | 0.694348 |
7c59986e641f07530dde637e11b0a6aa65d35c00 | 528 | py | Python | usaspending_api/references/migrations/0026_auto_20161021_1237.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | 1 | 2021-06-17T05:09:00.000Z | 2021-06-17T05:09:00.000Z | usaspending_api/references/migrations/0026_auto_20161021_1237.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | usaspending_api/references/migrations/0026_auto_20161021_1237.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-21 16:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('references', '0025_auto_20161021_0948'),
]
operations = [
migrations.RenameModel(
old_name='RefCFDAProgramInfo',
new_name='CFDAProgram',
),
migrations.AlterModelTable(
name='cfdaprogram',
table='cfda_program',
),
]
| 22 | 50 | 0.606061 |
e2509f20d2c32865b060349790ef66e7e21e9eba | 1,369 | py | Python | src/mojang.py | talwat/SkinModPy | d2d1891243087d7a812d4f58bb46ddcd611e1e5f | [
"MIT"
] | 1 | 2021-12-05T07:19:47.000Z | 2021-12-05T07:19:47.000Z | src/mojang.py | talwat/SkinModPy | d2d1891243087d7a812d4f58bb46ddcd611e1e5f | [
"MIT"
] | null | null | null | src/mojang.py | talwat/SkinModPy | d2d1891243087d7a812d4f58bb46ddcd611e1e5f | [
"MIT"
] | null | null | null | from json import loads
import methods
def getSkin(name, outputPath):
uuid = ""
try:
methods.log("Getting name profile info...")
rawNameData = methods.getFromInternet("https://api.mojang.com/users/profiles/minecraft/{}".format(name))
except Exception as e:
methods.log("An error occured while downloading the skin: {}.".format(str(e)), "fatal")
return "error"
if(len(rawNameData) > 0):
methods.log("Loading raw name profile info into json...")
userInfo = loads(rawNameData)
uuid = userInfo["id"]
else:
return "404"
methods.log("Getting full profile info with uuid...")
rawUuidData = methods.getFromInternet("https://sessionserver.mojang.com/session/minecraft/profile/{}".format(uuid))
methods.log("Loading raw uuid info into json...")
uuidData = loads(rawUuidData)
encodedRawSkinData = uuidData["properties"][0]["value"]
methods.log("Decoding raw skin data...")
decodedRawskinData = methods.base64Decode(encodedRawSkinData)
methods.log("Loading raw skin data into json...")
skinData = loads(decodedRawskinData)
skinUrl = skinData["textures"]["SKIN"]["url"]
methods.log("Downloading skin file from the Mojang API...")
methods.downloadFile(skinUrl, outputPath)
methods.log("Downloaded skin file!", "success")
return "success" | 41.484848 | 119 | 0.674945 |
5c7117f0dfa9e0f5c3321e0af2b3d3df0e22ed6c | 2,301 | py | Python | switchboard/models.py | gutma-org/droneregistry-broker | c53910e0819d75d1bbbe6427e1a178ff8cde7a02 | [
"Apache-2.0"
] | 2 | 2019-07-03T14:52:34.000Z | 2021-04-26T21:36:05.000Z | switchboard/models.py | gutma-org/droneregistry-broker | c53910e0819d75d1bbbe6427e1a178ff8cde7a02 | [
"Apache-2.0"
] | 2 | 2018-11-16T13:29:07.000Z | 2018-12-31T11:32:06.000Z | switchboard/models.py | gutma-org/droneregistry-broker | c53910e0819d75d1bbbe6427e1a178ff8cde7a02 | [
"Apache-2.0"
] | 2 | 2019-07-08T12:40:10.000Z | 2020-12-13T10:14:14.000Z | from django.db import models
import uuid
# Create your models here.
from datetime import date
from datetime import datetime
from datetime import timezone
from dateutil.relativedelta import relativedelta
from django.utils.translation import ugettext_lazy as _
import string, random
from django.core.validators import RegexValidator
class Registry(models.Model):
VERSION_CHOICES = ((0, _('NA')),(1, _('GUTMA_V1')),(2, _('GUTMA_V2')),)
AUTHENTICATION_METHOD_CHOICES = ((0, _('None')),(1, _('JWT')),(2, _('TOKEN')),)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
endpoint = models.URLField()
api_version = models.IntegerField(_('version'),choices=VERSION_CHOICES)
authentication = models.IntegerField(choices = AUTHENTICATION_METHOD_CHOICES, default = 0)
def __str__(self):
return self.endpoint
def __repr__(self):
return self.__str__()
def __uniode__(self):
return self.__str__()
# A class to store credentials
# class SercureStorage(models.Model):
# registry = models.ForeignKey(Registry, models.CASACADE)
class SearchQuery(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
TYPE_CHOICES = ((0, _('Regular')),(1, _('Privileged')),)
PARAMETER_CHOICES = ((0, _('Operator ID')),(1, _('RPAS ID')),(2, _('Pilot ID')),)
STATE_CHOICES = ((0, _('PENDING')),(1, _('RECEIVED')),(2, _('STARTED')),(3, _('SUCCESS')),(4, _('RETRY')),(5, _('IGNORED')),)
query = models.CharField(max_length=140)
query_type = models.IntegerField(choices=TYPE_CHOICES, default=0)
query_parameter = models.IntegerField(choices=PARAMETER_CHOICES, default=0)
credentials = models.TextField(default = '')
state = models.IntegerField(_('state'),choices=STATE_CHOICES, default = 0)
results = models.TextField(default="Querying registries, this will populate once all queries are complete..")
logs = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.state
def __repr__(self):
return self.__str__()
def __uniode__(self):
return self.__str__()
| 40.368421 | 130 | 0.683181 |
5cf40d106132d7658942cb90600e90a6134e6db5 | 1,592 | py | Python | pypdevs/src/pypdevs/message.py | martinvy/sin-model-elevators | ebf6511d61326972b2e366c8975f76a944196a6f | [
"MIT"
] | 1 | 2018-09-19T14:42:28.000Z | 2018-09-19T14:42:28.000Z | pypdevs/src/pypdevs/message.py | martinvy/sin-model-elevators | ebf6511d61326972b2e366c8975f76a944196a6f | [
"MIT"
] | null | null | null | pypdevs/src/pypdevs/message.py | martinvy/sin-model-elevators | ebf6511d61326972b2e366c8975f76a944196a6f | [
"MIT"
] | 2 | 2020-05-29T10:12:37.000Z | 2021-05-19T21:32:35.000Z | # Copyright 2014 Modelling, Simulation and Design Lab (MSDL) at
# McGill University and the University of Antwerp (http://msdl.cs.mcgill.ca/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Network messages used in the distributed simulation
"""
class NetworkMessage(object):
"""
Network messages used in the distributed simulation, simply a data class.
"""
def __init__(self, timestamp, content, uuid, color, destination):
"""
Constructor
:param timestamp: timestamp of the message
:param content: content of the message
:param uuid: UUID of the message
:param color: color of the message for Mattern's algorithm
:param destination: the model_id of the destination model
"""
self.timestamp = timestamp
self.content = content
self.uuid = uuid
self.color = color
self.destination = destination
def __lt__(self, other):
"""
Comparison of different NetworkMessages, necessary for Python3
"""
return self.timestamp < other.timestamp
| 35.377778 | 77 | 0.692211 |
aa350a56ebedd8c9b04f7dd47adf397f2e14976d | 678 | py | Python | core/layers/uigen/python/bin/rstpep2html.py | nerdguru/sls-pypi | b31c7738ac3bada5ecf3eaae8dcbc2d2a30b7dcf | [
"MIT"
] | null | null | null | core/layers/uigen/python/bin/rstpep2html.py | nerdguru/sls-pypi | b31c7738ac3bada5ecf3eaae8dcbc2d2a30b7dcf | [
"MIT"
] | 18 | 2019-10-10T00:08:35.000Z | 2019-10-23T15:34:42.000Z | core/layers/uigen/python/bin/rstpep2html.py | nerdguru/sls-pypi | b31c7738ac3bada5ecf3eaae8dcbc2d2a30b7dcf | [
"MIT"
] | null | null | null | #!/var/lang//bin/python3.6
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
| 26.076923 | 76 | 0.722714 |
4b607a9c11a230ed48ff8cd09ff3f2a3536f8214 | 171 | py | Python | apps/news/forms.py | shiqianlong/xfz | 3474794e9585eae42362cbcdc67836498f452571 | [
"BSL-1.0"
] | null | null | null | apps/news/forms.py | shiqianlong/xfz | 3474794e9585eae42362cbcdc67836498f452571 | [
"BSL-1.0"
] | null | null | null | apps/news/forms.py | shiqianlong/xfz | 3474794e9585eae42362cbcdc67836498f452571 | [
"BSL-1.0"
] | null | null | null | from django import forms
from apps.forms import FormMixin
class NewsCommentForm(forms.Form,FormMixin):
content = forms.CharField()
news_id = forms.IntegerField() | 24.428571 | 44 | 0.77193 |
f01fb962b7e02029cd3b5996fc25300610e6984b | 119 | py | Python | app/Python/testbiaoge1.py | biaogeniubi666/zhy_business_data_platform | 6cf45208421e1989fe961b0a4b6dff32735650a6 | [
"MIT"
] | null | null | null | app/Python/testbiaoge1.py | biaogeniubi666/zhy_business_data_platform | 6cf45208421e1989fe961b0a4b6dff32735650a6 | [
"MIT"
] | 6 | 2019-08-13T01:47:20.000Z | 2022-02-18T06:22:35.000Z | app/Python/testbiaoge1.py | biaogeniubi666/zhy_business_data_platform | 6cf45208421e1989fe961b0a4b6dff32735650a6 | [
"MIT"
] | null | null | null |
import random
def go(min = 0,max = 1):
randomfloat = random.uniform(min, max)
return randomfloat
| 13.222222 | 42 | 0.605042 |
e248279b0ae79771a8635ff4b9d32a3eb817c96e | 3,986 | py | Python | alipay/aop/api/request/AlipayOpenContentIotCouponQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayOpenContentIotCouponQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayOpenContentIotCouponQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenContentIotCouponQueryModel import AlipayOpenContentIotCouponQueryModel
class AlipayOpenContentIotCouponQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenContentIotCouponQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenContentIotCouponQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.content.iot.coupon.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.489655 | 148 | 0.64576 |
373a8fa78d9e97d043ab8bfeb79a7b0054698457 | 7,145 | py | Python | dev/Tools/build/waf-1.7.13/lmbrwaflib/compile_rules_win_x64_win_x64_clang.py | CJoriginal/cjlumberyard | 2e3184a7d8e59ba05e5707371b8cb6fe40b0ca60 | [
"AML"
] | 2 | 2020-08-20T03:40:24.000Z | 2021-02-07T20:31:43.000Z | dev/Tools/build/waf-1.7.13/lmbrwaflib/compile_rules_win_x64_win_x64_clang.py | CJoriginal/cjlumberyard | 2e3184a7d8e59ba05e5707371b8cb6fe40b0ca60 | [
"AML"
] | null | null | null | dev/Tools/build/waf-1.7.13/lmbrwaflib/compile_rules_win_x64_win_x64_clang.py | CJoriginal/cjlumberyard | 2e3184a7d8e59ba05e5707371b8cb6fe40b0ca60 | [
"AML"
] | 5 | 2020-08-27T20:44:18.000Z | 2021-08-21T22:54:11.000Z | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# Original file Copyright Crytek GMBH or its affiliates, used under license.
#
import os
from waflib.Configure import conf, Logs
from waflib.TaskGen import feature, after_method
from compile_rules_win_x64_win_x64_vs2015 import load_win_x64_win_x64_vs2015_common_settings
PLATFORM = 'win_x64_clang'
@conf
def load_win_x64_win_x64_clang_common_settings(conf):
"""
Setup all compiler and linker settings shared over all win_x64_win_x64 configurations
"""
v = conf.env
global PLATFORM
# Load MSVC settings for non-build stuff (AzCG, CrcFix, etc)
load_win_x64_win_x64_vs2015_common_settings(conf)
windows_kit = conf.options.win_vs2015_winkit
try:
_, _, _, system_includes, _, _ = conf.detect_msvc(windows_kit, True)
except:
Logs.warn('Unable to find Windows Kit {}, removing build target'.format(windows_kit))
conf.mark_supported_platform_for_removal(PLATFORM)
return
restricted_tool_list_macro_header = 'AZ_TOOLS_EXPAND_FOR_RESTRICTED_PLATFORMS='
restricted_tool_list_macro = restricted_tool_list_macro_header
# Start with a blank platform slate
conf.undefine('AZ_TOOLS_EXPAND_FOR_RESTRICTED_PLATFORMS')
if len(restricted_tool_list_macro) > len(restricted_tool_list_macro_header):
v['DEFINES'] += [ restricted_tool_list_macro ]
# Remove MSVC/clang specific settings
v['CFLAGS'] = []
v['CXXFLAGS'] = []
v['LINKFLAGS'] = []
# Linker
v['CCLNK_SRC_F'] = v['CXXLNK_SRC_F'] = []
v['CCLNK_TGT_F'] = v['CXXLNK_TGT_F'] = '/OUT:'
v['LIB_ST'] = '%s.lib'
v['LIBPATH_ST'] = '/LIBPATH:%s'
v['STLIB_ST'] = '%s.lib'
v['STLIBPATH_ST'] = '/LIBPATH:%s'
v['cprogram_PATTERN'] = '%s.exe'
v['cxxprogram_PATTERN'] = '%s.exe'
v['cstlib_PATTERN'] = '%s.lib'
v['cxxstlib_PATTERN'] = '%s.lib'
v['cshlib_PATTERN'] = '%s.dll'
v['cxxshlib_PATTERN'] = '%s.dll'
v['LINKFLAGS_cshlib'] = ['/DLL']
v['LINKFLAGS_cxxshlib'] = ['/DLL']
# AR Tools
v['ARFLAGS'] = ['/NOLOGO']
v['AR_TGT_F'] = '/OUT:'
# Delete the env variables so that they can be replaced with the clang versions
del v['AR']
del v['CC']
del v['CXX']
del v['LINK']
conf.find_program('clang', var='CC', silent_output=True)
conf.find_program('clang++', var='CXX', silent_output=True)
conf.find_program('llvm-lib', var='AR', silent_output=True)
conf.find_program('lld-link', var='LINK', silent_output=True)
v['LINK_CC'] = v['LINK_CXX'] = v['LINK']
clang_FLAGS = [
'-mcx16',
'-msse3',
'-Wno-macro-redefined',
'-Wno-microsoft-cast',
'-Wno-ignored-pragma-intrinsic', # Clang doens't need #pragma intrinsic anyway, so don't whine when one isn't recognized
]
# Path to clang.exe is [clang]/bin/clang.exe, but the include path is [clang]/lib/clang/6.0.0/include
clang_include_path = os.path.join(
os.path.dirname(os.path.dirname(v['CXX'])),
'lib', 'clang', '6.0.0', 'include')
system_includes = [clang_include_path] + system_includes
# Treat all MSVC include paths as system headers
for include in system_includes:
clang_FLAGS += ['-isystem', include]
v['CFLAGS'] += clang_FLAGS
v['CXXFLAGS'] += clang_FLAGS
v['DEFINES'] += [
'_CRT_SECURE_NO_WARNINGS',
'_CRT_NONSTDC_NO_WARNINGS',
]
v['LINKFLAGS'] += [
'/MACHINE:x64',
'/MANIFEST', # Create a manifest file
'/OPT:REF', '/OPT:ICF', # Always optimize for size, there's no reason not to
'/LARGEADDRESSAWARE', # tell the linker that the application can handle addresses larger than 2 gigabytes.
]
@conf
def load_debug_win_x64_win_x64_clang_settings(conf):
"""
Setup all compiler and linker settings shared over all win_x64_win_x64_clang configurations for
the 'debug' configuration
"""
conf.load_debug_windows_settings()
conf.load_win_x64_win_x64_clang_common_settings()
conf.load_debug_clang_settings()
conf.env['CXXFLAGS'] += [
'-O0',
'-gcodeview',
'-gno-column-info',
'-mllvm', '-emit-codeview-ghash-section',
]
conf.env['LINKFLAGS'] += [
'/debug:GHASH',
]
# Load additional shared settings
conf.load_debug_cryengine_settings()
@conf
def load_profile_win_x64_win_x64_clang_settings(conf):
"""
Setup all compiler and linker settings shared over all win_x64_win_x64_clang configurations for
the 'profile' configuration
"""
conf.load_profile_windows_settings()
conf.load_win_x64_win_x64_clang_common_settings()
conf.load_profile_clang_settings()
conf.env['CXXFLAGS'] += [
'-O3',
'-gcodeview',
'-gno-column-info',
'-mllvm', '-emit-codeview-ghash-section',
]
conf.env['LINKFLAGS'] += [
'/debug:GHASH',
]
# Load additional shared settings
conf.load_profile_cryengine_settings()
@conf
def load_performance_win_x64_win_x64_clang_settings(conf):
"""
Setup all compiler and linker settings shared over all win_x64_win_x64_clang configurations for
the 'performance' configuration
"""
conf.load_performance_windows_settings()
conf.load_win_x64_win_x64_clang_common_settings()
conf.load_performance_clang_settings()
conf.env['CXXFLAGS'] += [
'-O3',
'-gcodeview',
'-gno-column-info',
'-mllvm', '-emit-codeview-ghash-section',
]
conf.env['LINKFLAGS'] += [
'/debug:GHASH',
]
# Load additional shared settings
conf.load_performance_cryengine_settings()
@conf
def load_release_win_x64_win_x64_clang_settings(conf):
"""
Setup all compiler and linker settings shared over all win_x64_win_x64_clang configurations for
the 'release' configuration
"""
conf.load_release_windows_settings()
conf.load_win_x64_win_x64_clang_common_settings()
conf.load_release_clang_settings()
conf.env['CXXFLAGS'] += [
'-O3',
'-gline-tables-only'
]
# Load additional shared settings
conf.load_release_cryengine_settings()
@feature('c', 'cxx')
@after_method('apply_link')
@after_method('add_pch_to_dependencies')
def place_wall_first(self):
for t in getattr(self, 'compiled_tasks', []):
for flags_type in ('CFLAGS', 'CXXFLAGS'):
flags = t.env[flags_type]
try:
wall_idx = flags.index('-Wall')
flags[0], flags[wall_idx] = flags[wall_idx], flags[0]
except ValueError:
continue | 31.200873 | 128 | 0.661582 |
4bdc7347a66977b582b322c8d455a3927a3ff99b | 16,349 | bzl | Python | pkg/pkg.bzl | mmikitka/rules_pkg | b28e8c827dcdb4b16aa9aa81c6d59b17ea6a4497 | [
"Apache-2.0"
] | null | null | null | pkg/pkg.bzl | mmikitka/rules_pkg | b28e8c827dcdb4b16aa9aa81c6d59b17ea6a4497 | [
"Apache-2.0"
] | null | null | null | pkg/pkg.bzl | mmikitka/rules_pkg | b28e8c827dcdb4b16aa9aa81c6d59b17ea6a4497 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for manipulation of various packaging."""
load(":path.bzl", "compute_data_path", "dest_path")
# Filetype to restrict inputs
tar_filetype = [".tar", ".tar.gz", ".tgz", ".tar.xz", ".tar.bz2"]
deb_filetype = [".deb", ".udeb"]
def _remap(remap_paths, path):
"""If path starts with a key in remap_paths, rewrite it."""
for prefix, replacement in remap_paths.items():
if path.startswith(prefix):
return replacement + path[len(prefix):]
return path
def _quote(filename, protect = "="):
"""Quote the filename, by escaping = by \\= and \\ by \\\\"""
return filename.replace("\\", "\\\\").replace(protect, "\\" + protect)
def _pkg_tar_impl(ctx):
"""Implementation of the pkg_tar rule."""
# Files needed by rule implementation at runtime
files = []
# Compute the relative path
data_path = compute_data_path(ctx.outputs.out, ctx.attr.strip_prefix)
# Find a list of path remappings to apply.
remap_paths = ctx.attr.remap_paths
# Package dir can be specified by a file or inlined.
if ctx.attr.package_dir_file:
if ctx.attr.package_dir:
fail("Both package_dir and package_dir_file attributes were specified")
package_dir_arg = "--directory=@" + ctx.file.package_dir_file.path
files.append(ctx.file.package_dir_file)
else:
package_dir_arg = "--directory=" + ctx.attr.package_dir or "/"
# Start building the arguments.
args = [
"--output=" + ctx.outputs.out.path,
package_dir_arg,
"--mode=" + ctx.attr.mode,
"--owner=" + ctx.attr.owner,
"--owner_name=" + ctx.attr.ownername,
]
if ctx.attr.mtime != -1: # Note: Must match default in rule def.
if ctx.attr.portable_mtime:
fail("You may not set both mtime and portable_mtime")
args.append("--mtime=%d" % ctx.attr.mtime)
if ctx.attr.portable_mtime:
args.append("--mtime=portable")
# Add runfiles if requested
file_inputs = []
if ctx.attr.include_runfiles:
runfiles_depsets = []
for f in ctx.attr.srcs:
default_runfiles = f[DefaultInfo].default_runfiles
if default_runfiles != None:
runfiles_depsets.append(default_runfiles.files)
# deduplicates files in srcs attribute and their runfiles
file_inputs = depset(ctx.files.srcs, transitive = runfiles_depsets).to_list()
else:
file_inputs = ctx.files.srcs[:]
args += [
"--file=%s=%s" % (_quote(f.path), _remap(remap_paths, dest_path(f, data_path)))
for f in file_inputs
]
for target, f_dest_path in ctx.attr.files.items():
target_files = target.files.to_list()
if len(target_files) != 1:
fail("Each input must describe exactly one file.", attr = "files")
file_inputs += target_files
args += ["--file=%s=%s" % (_quote(target_files[0].path), f_dest_path)]
if ctx.attr.modes:
args += [
"--modes=%s=%s" % (_quote(key), ctx.attr.modes[key])
for key in ctx.attr.modes
]
if ctx.attr.owners:
args += [
"--owners=%s=%s" % (_quote(key), ctx.attr.owners[key])
for key in ctx.attr.owners
]
if ctx.attr.ownernames:
args += [
"--owner_names=%s=%s" % (_quote(key), ctx.attr.ownernames[key])
for key in ctx.attr.ownernames
]
if ctx.attr.empty_files:
args += ["--empty_file=%s" % empty_file for empty_file in ctx.attr.empty_files]
if ctx.attr.empty_dirs:
args += ["--empty_dir=%s" % empty_dir for empty_dir in ctx.attr.empty_dirs]
if ctx.attr.extension:
dotPos = ctx.attr.extension.find(".")
if dotPos > 0:
dotPos += 1
args += ["--compression=%s" % ctx.attr.extension[dotPos:]]
elif ctx.attr.extension == "tgz":
args += ["--compression=gz"]
args += ["--tar=" + f.path for f in ctx.files.deps]
args += [
"--link=%s:%s" % (_quote(k, protect = ":"), ctx.attr.symlinks[k])
for k in ctx.attr.symlinks
]
arg_file = ctx.actions.declare_file(ctx.label.name + ".args")
files.append(arg_file)
ctx.actions.write(arg_file, "\n".join(args))
ctx.actions.run(
mnemonic = "PackageTar",
inputs = file_inputs + ctx.files.deps + files,
executable = ctx.executable.build_tar,
arguments = ["@" + arg_file.path],
outputs = [ctx.outputs.out],
env = {
"LANG": "en_US.UTF-8",
"LC_CTYPE": "UTF-8",
"PYTHONIOENCODING": "UTF-8",
"PYTHONUTF8": "1",
},
use_default_shell_env = True,
)
return OutputGroupInfo(out = [ctx.outputs.out])
def _pkg_deb_impl(ctx):
"""The implementation for the pkg_deb rule."""
files = [ctx.file.data]
args = [
"--output=" + ctx.outputs.deb.path,
"--changes=" + ctx.outputs.changes.path,
"--data=" + ctx.file.data.path,
"--package=" + ctx.attr.package,
"--architecture=" + ctx.attr.architecture,
"--maintainer=" + ctx.attr.maintainer,
]
if ctx.attr.preinst:
args += ["--preinst=@" + ctx.file.preinst.path]
files += [ctx.file.preinst]
if ctx.attr.postinst:
args += ["--postinst=@" + ctx.file.postinst.path]
files += [ctx.file.postinst]
if ctx.attr.prerm:
args += ["--prerm=@" + ctx.file.prerm.path]
files += [ctx.file.prerm]
if ctx.attr.postrm:
args += ["--postrm=@" + ctx.file.postrm.path]
files += [ctx.file.postrm]
if ctx.attr.config:
args += ["--config=@" + ctx.file.config.path]
files += [ctx.file.config]
if ctx.attr.templates:
args += ["--templates=@" + ctx.file.templates.path]
files += [ctx.file.templates]
# Conffiles can be specified by a file or a string list
if ctx.attr.conffiles_file:
if ctx.attr.conffiles:
fail("Both conffiles and conffiles_file attributes were specified")
args += ["--conffile=@" + ctx.file.conffiles_file.path]
files += [ctx.file.conffiles_file]
elif ctx.attr.conffiles:
args += ["--conffile=%s" % cf for cf in ctx.attr.conffiles]
# Version and description can be specified by a file or inlined
if ctx.attr.version_file:
if ctx.attr.version:
fail("Both version and version_file attributes were specified")
args += ["--version=@" + ctx.file.version_file.path]
files += [ctx.file.version_file]
elif ctx.attr.version:
args += ["--version=" + ctx.attr.version]
else:
fail("Neither version_file nor version attribute was specified")
if ctx.attr.description_file:
if ctx.attr.description:
fail("Both description and description_file attributes were specified")
args += ["--description=@" + ctx.file.description_file.path]
files += [ctx.file.description_file]
elif ctx.attr.description:
args += ["--description=" + ctx.attr.description]
else:
fail("Neither description_file nor description attribute was specified")
# Built using can also be specified by a file or inlined (but is not mandatory)
if ctx.attr.built_using_file:
if ctx.attr.built_using:
fail("Both build_using and built_using_file attributes were specified")
args += ["--built_using=@" + ctx.file.built_using_file.path]
files += [ctx.file.built_using_file]
elif ctx.attr.built_using:
args += ["--built_using=" + ctx.attr.built_using]
if ctx.attr.depends_file:
if ctx.attr.depends:
fail("Both depends and depends_file attributes were specified")
args += ["--depends=@" + ctx.file.depends_file.path]
files += [ctx.file.depends_file]
elif ctx.attr.depends:
args += ["--depends=" + d for d in ctx.attr.depends]
if ctx.attr.priority:
args += ["--priority=" + ctx.attr.priority]
if ctx.attr.section:
args += ["--section=" + ctx.attr.section]
if ctx.attr.homepage:
args += ["--homepage=" + ctx.attr.homepage]
args += ["--distribution=" + ctx.attr.distribution]
args += ["--urgency=" + ctx.attr.urgency]
args += ["--suggests=" + d for d in ctx.attr.suggests]
args += ["--enhances=" + d for d in ctx.attr.enhances]
args += ["--conflicts=" + d for d in ctx.attr.conflicts]
args += ["--pre_depends=" + d for d in ctx.attr.predepends]
args += ["--recommends=" + d for d in ctx.attr.recommends]
ctx.actions.run(
mnemonic = "MakeDeb",
executable = ctx.executable.make_deb,
arguments = args,
inputs = files,
outputs = [ctx.outputs.deb, ctx.outputs.changes],
env = {
"LANG": "en_US.UTF-8",
"LC_CTYPE": "UTF-8",
"PYTHONIOENCODING": "UTF-8",
"PYTHONUTF8": "1",
},
)
ctx.actions.run_shell(
command = "ln -s %s %s" % (ctx.outputs.deb.basename, ctx.outputs.out.path),
inputs = [ctx.outputs.deb],
outputs = [ctx.outputs.out],
)
output_groups = {"out": [ctx.outputs.out]}
if hasattr(ctx.outputs, "deb"):
output_groups["deb"] = [ctx.outputs.deb]
if hasattr(ctx.outputs, "changes"):
output_groups["changes"] = [ctx.outputs.changes]
return OutputGroupInfo(**output_groups)
# A rule for creating a tar file, see README.md
pkg_tar_impl = rule(
implementation = _pkg_tar_impl,
attrs = {
"strip_prefix": attr.string(),
"package_dir": attr.string(),
"package_dir_file": attr.label(allow_single_file = True),
"deps": attr.label_list(allow_files = tar_filetype),
"srcs": attr.label_list(allow_files = True),
"files": attr.label_keyed_string_dict(allow_files = True),
"mode": attr.string(default = "0555"),
"modes": attr.string_dict(),
"mtime": attr.int(default = -1),
"portable_mtime": attr.bool(default = True),
"owner": attr.string(default = "0.0"),
"ownername": attr.string(default = "."),
"owners": attr.string_dict(),
"ownernames": attr.string_dict(),
"extension": attr.string(default = "tar"),
"symlinks": attr.string_dict(),
"empty_files": attr.string_list(),
"include_runfiles": attr.bool(),
"empty_dirs": attr.string_list(),
"remap_paths": attr.string_dict(),
# Outputs
"out": attr.output(),
# Implicit dependencies.
"build_tar": attr.label(
default = Label("@rules_pkg//:build_tar"),
cfg = "host",
executable = True,
allow_files = True,
),
},
)
def pkg_tar(**kwargs):
# Compatibility with older versions of pkg_tar that define files as
# a flat list of labels.
if "srcs" not in kwargs:
if "files" in kwargs:
if not hasattr(kwargs["files"], "items"):
label = "%s//%s:%s" % (native.repository_name(), native.package_name(), kwargs["name"])
print("%s: you provided a non dictionary to the pkg_tar `files` attribute. " % (label,) +
"This attribute was renamed to `srcs`. " +
"Consider renaming it in your BUILD file.")
kwargs["srcs"] = kwargs.pop("files")
extension = kwargs.get("extension") or "tar"
pkg_tar_impl(
out = kwargs["name"] + "." + extension,
**kwargs
)
# A rule for creating a deb file, see README.md
pkg_deb_impl = rule(
implementation = _pkg_deb_impl,
attrs = {
"data": attr.label(mandatory = True, allow_single_file = tar_filetype),
"package": attr.string(mandatory = True),
"architecture": attr.string(default = "all"),
"distribution": attr.string(default = "unstable"),
"urgency": attr.string(default = "medium"),
"maintainer": attr.string(mandatory = True),
"preinst": attr.label(allow_single_file = True),
"postinst": attr.label(allow_single_file = True),
"prerm": attr.label(allow_single_file = True),
"postrm": attr.label(allow_single_file = True),
"config": attr.label(allow_single_file = True),
"templates": attr.label(allow_single_file = True),
"conffiles_file": attr.label(allow_single_file = True),
"conffiles": attr.string_list(default = []),
"version_file": attr.label(allow_single_file = True),
"version": attr.string(),
"description_file": attr.label(allow_single_file = True),
"description": attr.string(),
"built_using_file": attr.label(allow_single_file = True),
"built_using": attr.string(),
"priority": attr.string(),
"section": attr.string(),
"homepage": attr.string(),
"depends": attr.string_list(default = []),
"depends_file": attr.label(allow_single_file = True),
"suggests": attr.string_list(default = []),
"enhances": attr.string_list(default = []),
"conflicts": attr.string_list(default = []),
"predepends": attr.string_list(default = []),
"recommends": attr.string_list(default = []),
# Outputs.
"out": attr.output(mandatory = True),
"deb": attr.output(mandatory = True),
"changes": attr.output(mandatory = True),
# Implicit dependencies.
"make_deb": attr.label(
default = Label("@rules_pkg//:make_deb"),
cfg = "host",
executable = True,
allow_files = True,
),
},
)
def pkg_deb(name, package, **kwargs):
"""Creates a deb file. See pkg_deb_impl."""
version = kwargs.get("version") or ""
architecture = kwargs.get("architecture") or "all"
out_deb = "%s_%s_%s.deb" % (package, version, architecture)
out_changes = "%s_%s_%s.changes" % (package, version, architecture)
pkg_deb_impl(
name = name,
package = package,
out = name + ".deb",
deb = out_deb,
changes = out_changes,
**kwargs
)
def _format_zip_file_arg(f):
return "%s=%s" % (_quote(f.path), dest_path(f, strip_prefix = None))
def _pkg_zip_impl(ctx):
args = ctx.actions.args()
args.add("-o", ctx.outputs.out.path)
args.add("-d", ctx.attr.package_dir)
args.add("-t", ctx.attr.timestamp)
args.add_all(
ctx.files.srcs,
map_each = _format_zip_file_arg,
)
args.set_param_file_format("multiline")
args.use_param_file("@%s")
ctx.actions.run(
mnemonic = "PackageZip",
inputs = ctx.files.srcs,
executable = ctx.executable.build_zip,
arguments = [args],
outputs = [ctx.outputs.out],
env = {
"LANG": "en_US.UTF-8",
"LC_CTYPE": "UTF-8",
"PYTHONIOENCODING": "UTF-8",
"PYTHONUTF8": "1",
},
use_default_shell_env = True,
)
return OutputGroupInfo(out=[ctx.outputs.out]);
pkg_zip_impl = rule(
implementation = _pkg_zip_impl,
attrs = {
"extension": attr.string(default = "zip"),
"srcs": attr.label_list(allow_files = True),
"package_dir": attr.string(default = "/"),
"timestamp": attr.int(default = 315532800),
"out": attr.output(),
# Implicit dependencies.
"build_zip": attr.label(
default = Label("@rules_pkg//:build_zip"),
cfg = "host",
executable = True,
allow_files = True,
),
},
)
def pkg_zip(name, **kwargs):
extension = kwargs.get("extension") or "zip"
pkg_zip_impl(
name = name,
out = name + "." + extension,
**kwargs
)
| 37.072562 | 105 | 0.593553 |
f1bef384cdc1fab118ad54040362f8f405f34bc5 | 1,852 | py | Python | src/pretix/multidomain/models.py | NicsTr/pretix | e6d2380d9ed1836cc64a688b2be20d00a8500eab | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-04-25T00:11:00.000Z | 2020-04-25T00:11:00.000Z | src/pretix/multidomain/models.py | NicsTr/pretix | e6d2380d9ed1836cc64a688b2be20d00a8500eab | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/pretix/multidomain/models.py | NicsTr/pretix | e6d2380d9ed1836cc64a688b2be20d00a8500eab | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from django.core.cache import cache
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_scopes import scopes_disabled
from pretix.base.models import Event, Organizer
class KnownDomain(models.Model):
domainname = models.CharField(max_length=255, primary_key=True)
organizer = models.ForeignKey(Organizer, blank=True, null=True, related_name='domains', on_delete=models.CASCADE)
event = models.ForeignKey(Event, blank=True, null=True, related_name='domains', on_delete=models.PROTECT)
class Meta:
verbose_name = _("Known domain")
verbose_name_plural = _("Known domains")
def __str__(self):
return self.domainname
@scopes_disabled()
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self.event:
self.event.get_cache().clear()
elif self.organizer:
self.organizer.get_cache().clear()
for event in self.organizer.events.all():
event.get_cache().clear()
cache.delete('pretix_multidomain_organizer_{}'.format(self.domainname))
cache.delete('pretix_multidomain_instance_{}'.format(self.domainname))
cache.delete('pretix_multidomain_event_{}'.format(self.domainname))
@scopes_disabled()
def delete(self, *args, **kwargs):
if self.event:
self.event.get_cache().clear()
elif self.organizer:
self.organizer.get_cache().clear()
for event in self.organizer.events.all():
event.get_cache().clear()
cache.delete('pretix_multidomain_organizer_{}'.format(self.domainname))
cache.delete('pretix_multidomain_instance_{}'.format(self.domainname))
cache.delete('pretix_multidomain_event_{}'.format(self.domainname))
super().delete(*args, **kwargs)
| 40.26087 | 117 | 0.678186 |
aaa2f0e1b1aaa3e9ba399a50611a62a5138e0060 | 1,674 | py | Python | physicalrobots/player/client_libs/libplayerc/bindings/python/test/test_position2d.py | parasol-ppl/PPL_utils | 92728bb89692fda1705a0dee436592d97922a6cb | [
"BSD-3-Clause"
] | null | null | null | physicalrobots/player/client_libs/libplayerc/bindings/python/test/test_position2d.py | parasol-ppl/PPL_utils | 92728bb89692fda1705a0dee436592d97922a6cb | [
"BSD-3-Clause"
] | null | null | null | physicalrobots/player/client_libs/libplayerc/bindings/python/test/test_position2d.py | parasol-ppl/PPL_utils | 92728bb89692fda1705a0dee436592d97922a6cb | [
"BSD-3-Clause"
] | null | null | null | #/*
# * Player - One Hell of a Robot Server
# * Copyright (C) 2004
# * Andrew Howard
# *
# *
# * This library is free software; you can redistribute it and/or
# * modify it under the terms of the GNU Lesser General Public
# * License as published by the Free Software Foundation; either
# * version 2.1 of the License, or (at your option) any later version.
# *
# * This library is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# * Lesser General Public License for more details.
# *
# * You should have received a copy of the GNU Lesser General Public
# * License along with this library; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Desc: Test the position2d interface
# Author: Andrew Howard
# Date: 15 Sep 2004
# CVS: $Id: test_position2d.py 8114 2009-07-24 11:28:20Z thjc $
from playerc import *
def test_position2d(client, index, context):
"""Basic test of the position2d interface."""
position2d = playerc_position2d(client, index)
if position2d.subscribe(PLAYERC_OPEN_MODE) != 0:
raise playerc_error_str()
for i in range(20):
while 1:
id = client.read()
if id == position2d.info.id:
break
if context:
print context,
print "position2d: [%14.3f] " % (position2d.info.datatime),
print '[%6.3f %6.3f %6.3f]' % (position2d.px, position2d.py, position2d.pa)
position2d.unsubscribe()
return
| 32.823529 | 83 | 0.655317 |
4ee064fab2560824001d4c1b3913d5e5c099482d | 8,257 | py | Python | sdk/python/feast/infra/local.py | jongillham/feast | 786dde7c50318b3a2387e16fe7365599bdcebf58 | [
"Apache-2.0"
] | 3 | 2021-05-07T06:03:44.000Z | 2022-02-20T10:43:59.000Z | sdk/python/feast/infra/local.py | jongillham/feast | 786dde7c50318b3a2387e16fe7365599bdcebf58 | [
"Apache-2.0"
] | null | null | null | sdk/python/feast/infra/local.py | jongillham/feast | 786dde7c50318b3a2387e16fe7365599bdcebf58 | [
"Apache-2.0"
] | 2 | 2021-05-07T06:03:40.000Z | 2021-07-17T08:32:02.000Z | import os
import sqlite3
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import pandas as pd
import pytz
from feast import FeatureTable, utils
from feast.entity import Entity
from feast.feature_view import FeatureView
from feast.infra.key_encoding_utils import serialize_entity_key
from feast.infra.offline_stores.helpers import get_offline_store_from_sources
from feast.infra.provider import (
Provider,
RetrievalJob,
_convert_arrow_to_proto,
_get_column_names,
_run_field_mapping,
)
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from feast.registry import Registry
from feast.repo_config import RepoConfig, SqliteOnlineStoreConfig
class LocalProvider(Provider):
_db_path: Path
def __init__(self, config: RepoConfig, repo_path: Path):
assert config is not None
assert config.online_store is not None
local_online_store_config = config.online_store
assert isinstance(local_online_store_config, SqliteOnlineStoreConfig)
local_path = Path(local_online_store_config.path)
if local_path.is_absolute():
self._db_path = local_path
else:
self._db_path = repo_path.joinpath(local_path)
def _get_conn(self):
Path(self._db_path).parent.mkdir(exist_ok=True)
return sqlite3.connect(
self._db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
)
def update_infra(
self,
project: str,
tables_to_delete: Sequence[Union[FeatureTable, FeatureView]],
tables_to_keep: Sequence[Union[FeatureTable, FeatureView]],
entities_to_delete: Sequence[Entity],
entities_to_keep: Sequence[Entity],
partial: bool,
):
conn = self._get_conn()
for table in tables_to_keep:
conn.execute(
f"CREATE TABLE IF NOT EXISTS {_table_id(project, table)} (entity_key BLOB, feature_name TEXT, value BLOB, event_ts timestamp, created_ts timestamp, PRIMARY KEY(entity_key, feature_name))"
)
conn.execute(
f"CREATE INDEX IF NOT EXISTS {_table_id(project, table)}_ek ON {_table_id(project, table)} (entity_key);"
)
for table in tables_to_delete:
conn.execute(f"DROP TABLE IF EXISTS {_table_id(project, table)}")
def teardown_infra(
self,
project: str,
tables: Sequence[Union[FeatureTable, FeatureView]],
entities: Sequence[Entity],
) -> None:
os.unlink(self._db_path)
def online_write_batch(
self,
project: str,
table: Union[FeatureTable, FeatureView],
data: List[
Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
],
progress: Optional[Callable[[int], Any]],
) -> None:
conn = self._get_conn()
with conn:
for entity_key, values, timestamp, created_ts in data:
entity_key_bin = serialize_entity_key(entity_key)
timestamp = _to_naive_utc(timestamp)
if created_ts is not None:
created_ts = _to_naive_utc(created_ts)
for feature_name, val in values.items():
conn.execute(
f"""
UPDATE {_table_id(project, table)}
SET value = ?, event_ts = ?, created_ts = ?
WHERE (entity_key = ? AND feature_name = ?)
""",
(
# SET
val.SerializeToString(),
timestamp,
created_ts,
# WHERE
entity_key_bin,
feature_name,
),
)
conn.execute(
f"""INSERT OR IGNORE INTO {_table_id(project, table)}
(entity_key, feature_name, value, event_ts, created_ts)
VALUES (?, ?, ?, ?, ?)""",
(
entity_key_bin,
feature_name,
val.SerializeToString(),
timestamp,
created_ts,
),
)
def online_read(
self,
project: str,
table: Union[FeatureTable, FeatureView],
entity_keys: List[EntityKeyProto],
) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
conn = self._get_conn()
cur = conn.cursor()
result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
for entity_key in entity_keys:
entity_key_bin = serialize_entity_key(entity_key)
cur.execute(
f"SELECT feature_name, value, event_ts FROM {_table_id(project, table)} WHERE entity_key = ?",
(entity_key_bin,),
)
res = {}
res_ts = None
for feature_name, val_bin, ts in cur.fetchall():
val = ValueProto()
val.ParseFromString(val_bin)
res[feature_name] = val
res_ts = ts
if not res:
result.append((None, None))
else:
result.append((res_ts, res))
return result
def materialize_single_feature_view(
self,
feature_view: FeatureView,
start_date: datetime,
end_date: datetime,
registry: Registry,
project: str,
) -> None:
entities = []
for entity_name in feature_view.entities:
entities.append(registry.get_entity(entity_name, project))
(
join_key_columns,
feature_name_columns,
event_timestamp_column,
created_timestamp_column,
) = _get_column_names(feature_view, entities)
start_date = utils.make_tzaware(start_date)
end_date = utils.make_tzaware(end_date)
offline_store = get_offline_store_from_sources([feature_view.input])
table = offline_store.pull_latest_from_table_or_query(
data_source=feature_view.input,
join_key_columns=join_key_columns,
feature_name_columns=feature_name_columns,
event_timestamp_column=event_timestamp_column,
created_timestamp_column=created_timestamp_column,
start_date=start_date,
end_date=end_date,
)
if feature_view.input.field_mapping is not None:
table = _run_field_mapping(table, feature_view.input.field_mapping)
join_keys = [entity.join_key for entity in entities]
rows_to_write = _convert_arrow_to_proto(table, feature_view, join_keys)
self.online_write_batch(project, feature_view, rows_to_write, None)
feature_view.materialization_intervals.append((start_date, end_date))
registry.apply_feature_view(feature_view, project)
@staticmethod
def get_historical_features(
config: RepoConfig,
feature_views: List[FeatureView],
feature_refs: List[str],
entity_df: Union[pd.DataFrame, str],
registry: Registry,
project: str,
) -> RetrievalJob:
offline_store = get_offline_store_from_sources(
[feature_view.input for feature_view in feature_views]
)
return offline_store.get_historical_features(
config=config,
feature_views=feature_views,
feature_refs=feature_refs,
entity_df=entity_df,
registry=registry,
project=project,
)
def _table_id(project: str, table: Union[FeatureTable, FeatureView]) -> str:
return f"{project}_{table.name}"
def _to_naive_utc(ts: datetime):
if ts.tzinfo is None:
return ts
else:
return ts.astimezone(pytz.utc).replace(tzinfo=None)
| 34.987288 | 204 | 0.595374 |
077a0adb87e7a11767dd69ca38c9a40673d9d479 | 5,531 | py | Python | 6_preview_outliers.py | jphdotam/T1T2 | b5003f5cf3aaddc4f43a7b7b4a77f52cef956c27 | [
"MIT"
] | null | null | null | 6_preview_outliers.py | jphdotam/T1T2 | b5003f5cf3aaddc4f43a7b7b4a77f52cef956c27 | [
"MIT"
] | null | null | null | 6_preview_outliers.py | jphdotam/T1T2 | b5003f5cf3aaddc4f43a7b7b4a77f52cef956c27 | [
"MIT"
] | null | null | null | import os
import numpy as np
import skimage.io
import matplotlib.pyplot as plt
from glob import glob
import torch
from lib.cfg import load_config
from lib.hrnet import get_hrnet_model, get_hrnet_cfg
from lib.landmarks import load_landmark_model, perform_cmr_landmark_detection, extend_landmarks
from lib.tracing import get_epi_end_paths_from_heatmap_and_landmarks as get_paths
from lib.inference import center_crop, pad_if_needed, get_normalized_channel_stack, prep_normalized_stack_for_inference, \
tta, paths_to_ridge_polygons
from lib.vis import compute_bullseye_sector_mask_for_slice
from lib.dataset import load_npy_file
from lib.windows import normalize_data
from lib.cmaps import default_cmap
CONFIG = "experiments/036_mini.yaml"
POSE_MODELPATH = r"E:\Dropbox\Work\Other projects\T1T2\output\models\036_mini\70_0.0010686.pt"
# CONFIG = "experiments/030.yaml"
# POSE_MODELPATH = r"E:\Dropbox\Work\Other projects\T1T2\output\models\030\154_0.0004970.pt"
LANDMARK_MODELPATH = "./data/models/landmark_model.pts"
TEST_DICOM_DIR = r"E:\Dropbox\Work\Other projects\T1T2\data\dicoms\mini_test"
FOV = 256
DEVICE = "cuda"
cfg, _ = load_config(CONFIG)
dates_for_studies = glob(os.path.join(TEST_DICOM_DIR, "**/*.npy"), recursive=True)
dates_for_studies = {os.path.basename(os.path.dirname(f)): os.path.basename(os.path.dirname(os.path.dirname(f))) for f
in dates_for_studies}
# LOAD MODELS
model = get_hrnet_model(get_hrnet_cfg(cfg)).to(DEVICE)
model = model.eval()
model.load_state_dict(torch.load(POSE_MODELPATH)['state_dict'])
# OUTLIERS = ['T1T2_141613_54120998_54121006_116_20201113-103051__T1_T2_PD_SLC1_CON0_PHS0_REP0_SET0_AVE0_2.npy',
# 'T1T2_141613_54120998_54121006_116_20201113-103051__T1_T2_PD_SLC2_CON0_PHS0_REP0_SET0_AVE0_3.npy',
# 'T1T2_141316_22451303_22451310_237_20201118-114441__T1_T2_PD_SLC1_CON0_PHS0_REP0_SET0_AVE0_2.npy']
OUTLIERS = ['T1T2_141613_50451101_50451109_342_20201110-130053__T1_T2_PD_SLC1_CON0_PHS0_REP0_SET0_AVE0_2.npy',
'T1T2_141316_12997720_12997727_307_20201109-115256__T1_T2_PD_SLC1_CON0_PHS0_REP0_SET0_AVE0_2.npy',
'T1T2_141613_46420590_46420598_374_20201105-125731__T1_T2_PD_SLC1_CON0_PHS0_REP0_SET0_AVE0_2.npy',
'T1T2_141316_9163956_9163963_427_20201106-152602__T1_T2_PD_SLC0_CON0_PHS0_REP0_SET0_AVE0_1.npy',
'T1T2_141316_7967828_7967835_263_20201105-112420__T1_T2_PD_SLC1_CON0_PHS0_REP0_SET0_AVE0_2.npy',
'T1T2_141316_12997805_12997812_572_20201109-155800__T1_T2_PD_SLC0_CON0_PHS0_REP0_SET0_AVE0_1.npy',
'T1T2_141316_12997720_12997727_307_20201109-115256__T1_T2_PD_SLC2_CON0_PHS0_REP0_SET0_AVE0_3.npy',
'T1T2_141613_47862334_47862342_176_20201106-104622__T1_T2_PD_SLC1_CON0_PHS0_REP0_SET0_AVE0_2.npy',
'T1T2_141316_7967782_7967789_58_20201105-084313__T1_T2_PD_SLC1_CON0_PHS0_REP0_SET0_AVE0_2.npy',
'T1T2_141316_12997851_12997858_828_20201109-183708__T1_T2_PD_SLC0_CON0_PHS0_REP0_SET0_AVE0_1.npy']
landmark_model = load_landmark_model(LANDMARK_MODELPATH)
def plot_predictions(t1_pre, t1_post, t2, pred, mask_lvcav, mask_lvwall, landmark_points=None):
fig, axes = plt.subplots(2, 3, figsize=(10, 10))
# Native
axes[0, 0].imshow(t1_pre, cmap=default_cmap)
axes[0, 1].imshow(t1_post, cmap=default_cmap)
axes[0, 2].imshow(t2, cmap=default_cmap)
axes[1, 0].imshow(np.max(pred, axis=0))
axes[1, 1].imshow(mask_lvcav + pred[1])
axes[1, 2].imshow(mask_lvwall + pred[0])
if landmark_points is not None:
x_ant = landmark_points[[0, 2], 0]
y_ant = landmark_points[[0, 2], 1]
x_post = landmark_points[[1, 2], 0]
y_post = landmark_points[[1, 2], 1]
axes[1, 0].plot(x_ant, y_ant)
axes[1, 0].plot(x_post, y_post)
axes[1, 1].plot(x_ant, y_ant)
axes[1, 1].plot(x_post, y_post)
axes[1, 2].plot(x_ant, y_ant)
axes[1, 2].plot(x_post, y_post)
fig.show()
for i, outlier in enumerate(OUTLIERS):
seq_dir, npy_name = outlier.split('__')
date_dir = dates_for_studies[seq_dir]
t1w, t2w, pd, t1_raw, t2_raw = load_npy_file(os.path.join(TEST_DICOM_DIR, date_dir, seq_dir, npy_name))
t1_pre, t1_post, t2, t1w, t2w, pd, t1_t2 = get_normalized_channel_stack(t1_raw, t2_raw, t1w, t2w, pd,
data_stack_format='all')
x = prep_normalized_stack_for_inference(t1_t2, FOV, as_tensor=True, tensor_device=DEVICE)
# Landmark detection
t2w_landmark, _top_left_landmark = center_crop(pad_if_needed(t2w,
min_height=FOV,
min_width=FOV),
crop_height=FOV,
crop_width=FOV)
landmark_points, landmark_probs = perform_cmr_landmark_detection(t2w_landmark, model=landmark_model)
if np.any(landmark_points == -1):
print(f"Skipping {npy_name} - unable to identify all landmarks")
continue
landmark_points = extend_landmarks(landmark_points, FOV)
with torch.no_grad():
pred = tta(model, x).cpu().numpy()[0]
(xs_epi, ys_epi), (xs_end, ys_end) = get_paths(pred, landmark_points)
mask_lvcav, mask_lvwall = paths_to_ridge_polygons(xs_epi, ys_epi, xs_end, ys_end, FOV)
plot_predictions(t1_pre, t1_post, t2, pred, mask_lvcav, mask_lvwall, landmark_points)
break
| 46.091667 | 122 | 0.722654 |
4720c4367c3db96a85b8241012dd65354d807e6f | 2,017 | py | Python | third_party/webrtc/src/chromium/src/tools/perf/benchmarks/text_selection.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 8 | 2016-02-08T11:59:31.000Z | 2020-05-31T15:19:54.000Z | third_party/webrtc/src/chromium/src/tools/perf/benchmarks/text_selection.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 1 | 2021-05-05T11:11:31.000Z | 2021-05-05T11:11:31.000Z | third_party/webrtc/src/chromium/src/tools/perf/benchmarks/text_selection.py | bopopescu/webrtc-streaming-node | 727a441204344ff596401b0253caac372b714d91 | [
"MIT"
] | 7 | 2016-02-09T09:28:14.000Z | 2020-07-25T19:03:36.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from telemetry import benchmark
from telemetry.timeline import tracing_category_filter
from telemetry.web_perf import timeline_based_measurement
import page_sets
TEXT_SELECTION_CATEGORY = 'blink'
TIMELINE_REQUIRED_CATEGORY = 'blink.console'
class _TextSelection(perf_benchmark.PerfBenchmark):
page_set = page_sets.TextSelectionSitesPageSet
def CreateTimelineBasedMeasurementOptions(self):
cat_filter = tracing_category_filter.CreateMinimalOverheadFilter()
cat_filter.AddIncludedCategory(TEXT_SELECTION_CATEGORY)
cat_filter.AddIncludedCategory(TIMELINE_REQUIRED_CATEGORY)
return timeline_based_measurement.Options(
overhead_level=cat_filter)
@classmethod
def Name(cls):
return 'text_selection'
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
if 'text-selection' not in value.name:
return False
return value.values != None
# See crbug.com/519044
@benchmark.Disabled()
class TextSelectionDirection(_TextSelection):
"""Measure text selection metrics while dragging a touch selection handle on a
subset of top ten mobile sites and using the 'direction' touch selection
strategy."""
def SetExtraBrowserOptions(self, options):
options.AppendExtraBrowserArgs(['--touch-selection-strategy=direction'])
@classmethod
def Name(cls):
return 'text_selection.direction'
# See crbug.com/519044
@benchmark.Disabled()
class TextSelectionCharacter(_TextSelection):
"""Measure text selection metrics while dragging a touch selection handle on a
subset of top ten mobile sites and using the 'character' touch selection
strategy."""
def SetExtraBrowserOptions(self, options):
options.AppendExtraBrowserArgs(['--touch-selection-strategy=character'])
@classmethod
def Name(cls):
return 'text_selection.character'
| 31.030769 | 80 | 0.790283 |
adcd5c9ada1beb5129d33d1662b9122df2b7d4e3 | 2,926 | py | Python | pandapower/pf/dSbus_dV_numba.py | suzannejanssen/pandapower | 8d0d422c28924c85e774e0e357e4abff86ff3c55 | [
"BSD-3-Clause"
] | 1 | 2020-10-19T06:39:15.000Z | 2020-10-19T06:39:15.000Z | pandapower/pf/dSbus_dV_numba.py | miek770/pandapower | de004efc1b7432a633792af4f551f7635a02db47 | [
"BSD-3-Clause"
] | null | null | null | pandapower/pf/dSbus_dV_numba.py | miek770/pandapower | de004efc1b7432a633792af4f551f7635a02db47 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2018 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
from numba import jit
from numpy import conj, zeros, complex128
from scipy.sparse import issparse, csr_matrix as sparse
from pandapower.pf.dSbus_dV_pypower import dSbus_dV_dense
# @jit(Tuple((c16[:], c16[:]))(c16[:], i4[:], i4[:], c16[:], c16[:]), nopython=True, cache=True)
@jit(nopython=True, cache=True)
def dSbus_dV_numba_sparse(Yx, Yp, Yj, V, Vnorm, Ibus): # pragma: no cover
"""Computes partial derivatives of power injection w.r.t. voltage.
Calculates faster with numba and sparse matrices.
Input: Ybus in CSR sparse form (Yx = data, Yp = indptr, Yj = indices), V and Vnorm (= V / abs(V))
OUTPUT: data from CSR form of dS_dVm, dS_dVa
(index pointer and indices are the same as the ones from Ybus)
Translation of: dS_dVm = dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm
dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)
"""
# transform input
# init buffer vector
buffer = zeros(len(V), dtype=complex128)
dS_dVm = Yx.copy()
dS_dVa = Yx.copy()
# iterate through sparse matrix
for r in range(len(Yp) - 1):
for k in range(Yp[r], Yp[r + 1]):
# Ibus = Ybus * V
buffer[r] += Yx[k] * V[Yj[k]]
# Ybus * diag(Vnorm)
dS_dVm[k] *= Vnorm[Yj[k]]
# Ybus * diag(V)
dS_dVa[k] *= V[Yj[k]]
Ibus[r] += buffer[r]
# conj(diagIbus) * diagVnorm
buffer[r] = conj(buffer[r]) * Vnorm[r]
for r in range(len(Yp) - 1):
for k in range(Yp[r], Yp[r + 1]):
# diag(V) * conj(Ybus * diagVnorm)
dS_dVm[k] = conj(dS_dVm[k]) * V[r]
if r == Yj[k]:
# diagonal elements
dS_dVa[k] = -Ibus[r] + dS_dVa[k]
dS_dVm[k] += buffer[r]
# 1j * diagV * conj(diagIbus - Ybus * diagV)
dS_dVa[k] = conj(-dS_dVa[k]) * (1j * V[r])
return dS_dVm, dS_dVa
def dSbus_dV(Ybus, V, I=None):
"""
Calls functions to calculate dS/dV depending on whether Ybus is sparse or not
"""
if issparse(Ybus):
# I is substracted from Y*V,
# therefore it must be negative for numba version of dSbus_dV if it is not zeros anyways
I = zeros(len(V), dtype=complex128) if I is None else -I
# calculates sparse data
dS_dVm, dS_dVa = dSbus_dV_numba_sparse(Ybus.data, Ybus.indptr, Ybus.indices, V, V / abs(V), I)
# generate sparse CSR matrices with computed data and return them
return sparse((dS_dVm, Ybus.indices, Ybus.indptr)), sparse((dS_dVa, Ybus.indices, Ybus.indptr))
else:
I = zeros(len(V), dtype=complex128) if I is None else I
return dSbus_dV_dense(Ybus, V, I)
| 35.253012 | 103 | 0.595352 |
941f19ce2a9a332e5f6e1b69f49df4bb98e9a4c5 | 7,335 | py | Python | proxy_checker.py | AlanBaumgartner/proxy_checker | d7c357d173241f3383c4295fd3b90ce977a05908 | [
"MIT"
] | 1 | 2017-04-24T20:45:32.000Z | 2017-04-24T20:45:32.000Z | proxy_checker.py | AlanBaumgartner/proxy_checker | d7c357d173241f3383c4295fd3b90ce977a05908 | [
"MIT"
] | 1 | 2017-04-10T18:43:47.000Z | 2017-04-14T21:33:46.000Z | proxy_checker.py | AlanBaumgartner/proxy_checker | d7c357d173241f3383c4295fd3b90ce977a05908 | [
"MIT"
] | null | null | null | import sys, aiohttp, asyncio
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
__author__ = 'Alan Baumgartner'
class ImportDialog(QDialog):
def __init__(self):
super().__init__()
self.setWindowFlags(self.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowTitle('Import Proxies')
layout = QGridLayout()
self.file_label = QLabel('Filename')
self.file_text = QLineEdit()
buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addWidget(self.file_label, 0, 0)
layout.addWidget(self.file_text, 0, 1)
layout.addWidget(buttons, 1, 0, 2, 0)
self.setLayout(layout)
self.setGeometry(400, 400, 300, 60)
@staticmethod
def getFileInfo():
dialog = ImportDialog()
result = dialog.exec_()
return dialog.file_text.text(), result == QDialog.Accepted
class ExportDialog(QDialog):
def __init__(self):
super().__init__()
self.setWindowFlags(self.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.setWindowModality(Qt.ApplicationModal)
self.setWindowTitle('Export Proxies')
layout = QGridLayout()
self.file_label = QLabel('Filename')
self.file_text = QLineEdit()
buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addWidget(self.file_label, 0, 0)
layout.addWidget(self.file_text, 0, 1)
layout.addWidget(buttons, 1, 0, 2, 0)
self.setLayout(layout)
self.setGeometry(400, 400, 300, 60)
@staticmethod
def getFileInfo():
dialog = ExportDialog()
result = dialog.exec_()
return dialog.file_text.text(), result == QDialog.Accepted
class Checker(QThread):
update = pyqtSignal(object)
pupdate = pyqtSignal(object)
count = 0
URL = 'http://check-host.net/ip'
def run(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(self.main())
finally:
loop.close()
async def check_proxies(self, proxy, orginal_ip, session, sem, lock):
async with sem:
try:
async with session.get(self.URL, proxy=proxy, timeout=3) as resp:
response = (await resp.read()).decode()
if response != orginal_ip:
self.update.emit(proxy)
except:
pass
finally:
with await lock:
self.count += 1
self.pupdate.emit(self.count)
async def main(self):
sem = asyncio.BoundedSemaphore(50)
lock = asyncio.Lock()
async with aiohttp.ClientSession() as session:
async with session.get(self.URL) as resp:
orginal_ip = (await resp.read()).decode()
proxies = get_proxies()
tasks = [self.check_proxies(proxy, orginal_ip, session, sem, lock) for proxy in proxies]
await asyncio.gather(*tasks)
class App(QMainWindow):
def __init__(self):
#Declare some shit
super().__init__()
self.title = 'Proxy Checker'
self.left = 300
self.top = 300
self.width = 500
self.height = 500
self.initUI()
def initUI(self):
#Setup layout
wid = QWidget(self)
self.setCentralWidget(wid)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
layout = QGridLayout()
wid.setLayout(layout)
#Create Widgets
menu_bar = self.menuBar()
menu = menu_bar.addMenu("File")
import_action = QAction("Import Proxies", self)
import_action.triggered.connect(self.import_proxies)
export_action = QAction("Export Proxies", self)
export_action.triggered.connect(self.export_proxies)
quit_action = QAction("Close", self)
quit_action.triggered.connect(self.quit)
menu.addAction(import_action)
menu.addAction(export_action)
menu.addAction(quit_action)
self.start_button = QPushButton('Start')
self.start_button.clicked.connect(self.start_clicked)
self.stop_button = QPushButton('Stop')
self.stop_button.clicked.connect(self.stop_clicked)
self.input_text = QTextEdit()
self.output_text = QTextEdit()
self.input_label = QLabel('Proxies to Check')
self.input_label.setAlignment(Qt.AlignCenter)
self.output_label = QLabel('Working Proxies')
self.output_label.setAlignment(Qt.AlignCenter)
self.progress_bar = QProgressBar()
#Add widgets to gui
layout.addWidget(self.input_label, 0, 0)
layout.addWidget(self.output_label, 0, 1)
layout.addWidget(self.input_text, 1, 0)
layout.addWidget(self.output_text, 1, 1)
layout.addWidget(self.start_button, 2, 0)
layout.addWidget(self.stop_button, 2, 1)
layout.addWidget(self.progress_bar, 3, 0, 4, 0)
#When start button is clicked, start the QThread to check for proxies
def start_clicked(self):
proxies = get_proxies()
self.progress_bar.setMaximum(len(proxies))
self.output_text.setText('')
self.thread = Checker(self)
self.thread.update.connect(self.update_text)
self.thread.pupdate.connect(self.update_progress)
self.thread.start()
#When stop button is clicked, terminate the thread
def stop_clicked(self):
try:
self.thread.terminate()
except:
pass
def update_text(self, text):
self.output_text.append(str(text))
def update_progress(self, val):
self.progress_bar.setValue(val)
def export_proxies(self):
exportDialog = ExportDialog()
filename, result = exportDialog.getFileInfo()
if result:
try:
proxies = self.output_text.toPlainText()
proxies = proxies.strip()
with open(filename, "w") as a:
a.write(proxies)
except:
pass
else:
pass
def import_proxies(self):
importDialog = ImportDialog()
filename, result = importDialog.getFileInfo()
if result:
try:
with open(filename, "r") as f:
out = f.read()
self.input_text.setText(out)
except:
pass
else:
pass
def quit(self):
sys.exit()
if __name__ == '__main__':
def get_proxies():
proxies = window.input_text.toPlainText()
proxies = proxies.strip()
proxies = proxies.split('\n')
return proxies
app = QApplication(sys.argv)
window = App()
window.show()
sys.exit(app.exec_()) | 29.817073 | 104 | 0.597819 |
e43960744ac21d0b5a7f88e2704f498273981e38 | 580 | py | Python | src/Sprites/Groups/nests.py | IUS-CS/project-scrumger-games | d8c6bad99ccaf10d3cca05b6fc44799e2f46ad2a | [
"MIT"
] | 3 | 2021-02-02T18:11:34.000Z | 2021-04-16T20:47:40.000Z | src/Sprites/Groups/nests.py | IUS-CS/project-scrumger-games | d8c6bad99ccaf10d3cca05b6fc44799e2f46ad2a | [
"MIT"
] | 3 | 2021-03-10T14:34:05.000Z | 2021-04-14T03:11:56.000Z | src/Sprites/Groups/nests.py | IUS-CS/project-scrumger-games | d8c6bad99ccaf10d3cca05b6fc44799e2f46ad2a | [
"MIT"
] | 5 | 2021-02-02T19:01:01.000Z | 2021-12-14T22:19:54.000Z | import pygame.sprite
class DisabledNests(pygame.sprite.Group):
"""
Contains a static method used to check if all the nests have been reached, and activate the win condition.
"""
@staticmethod
def check_for_win(player):
"""
Returns true if the player has reached all 5 nests.
- :param player:
A Player object. This player is checked for the win condition.
- :return:
None
"""
if len(player.disabled_nests.sprites()) >= 5:
return True
else:
return False
| 25.217391 | 110 | 0.591379 |
d24f2160a6df7475ec3c76bd9ffe0e27487dd5b3 | 4,025 | py | Python | source/pythonds3/basic/linked_list.py | umknow/python | 6fc4ad43bf0886c49b807c203ca485a15056c97c | [
"BSD-2-Clause"
] | null | null | null | source/pythonds3/basic/linked_list.py | umknow/python | 6fc4ad43bf0886c49b807c203ca485a15056c97c | [
"BSD-2-Clause"
] | null | null | null | source/pythonds3/basic/linked_list.py | umknow/python | 6fc4ad43bf0886c49b807c203ca485a15056c97c | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Bradley N. Miller, David L. Ranum
Problem Solving with Algorithms and Data Structures using Python
Copyright 2005
Updated by Roman Yasinovskyy, 2017
"""
from abc import ABC, abstractmethod
class LinkedListNode:
"""A node of a linked list"""
def __init__(self, node_data):
self._data = node_data
self._next = None
def get_data(self):
"""Get node data"""
return self._data
def set_data(self, node_data):
"""Set node data"""
self._data = node_data
data = property(get_data, set_data)
def get_next(self):
"""Get next node"""
return self._next
def set_next(self, node_next):
"""Set next node"""
self._next = node_next
next = property(get_next, set_next)
def __str__(self):
"""String"""
return str(self._data)
class LinkedList(ABC):
"""Linked List class implementation"""
def __init__(self):
"""Create a linked list"""
self._head = None
self._count = 0
def is_empty(self):
"""Is the list empty"""
return self._head is None
def size(self):
"""Size of the list"""
return self._count
def __len__(self):
"""Size of the list"""
return self._count
def __str__(self):
"""List as a string"""
list_str = "["
current = self._head
while current:
list_str += str(current)
if current.next:
list_str += ", "
current = current.next
list_str += "]"
return list_str
@abstractmethod
def add(self, value):
"""Add a new node"""
pass
@abstractmethod
def remove(self, value):
"""Remove a node with a specific value"""
pass
@abstractmethod
def search(self, value):
"""Search for a node with a specific value"""
pass
class UnorderedList(LinkedList):
"""Unordered linked list implementation"""
def __init__(self):
"""Create an unordered linked list"""
LinkedList.__init__(self)
def add(self, value):
"""Add a new node"""
new_node = LinkedListNode(value)
new_node.set_next(self._head)
self._head = new_node
self._count = self._count + 1
def remove(self, value):
"""Remove a node with a specific value"""
current = self._head
prev = None
while current:
if current.data == value:
if prev is None:
self._head = current.next
else:
prev.next = current.next
self._count = self._count - 1
return
prev = current
current = current.next
raise ValueError("{} is not in the list".format(value))
def search(self, value):
"""Search for a node with a specific value"""
current = self._head
while current:
if current.data == value:
return True
current = current.next
return False
class OrderedList(LinkedList):
"""Ordered linked list implementation"""
def __init__(self):
"""Create an Ordered linked list"""
LinkedList.__init__(self)
def add(self, value):
"""Add a new node"""
current = self._head
prev = None
new_node = LinkedListNode(value)
while current and current.data < value:
prev = current
current = current.next
if prev is None:
new_node.next = self._head
self._head = new_node
else:
new_node.next = current
prev.next = new_node
self._count = self._count + 1
def remove(self, value):
"""Remove a node with a specific value"""
# This is an exercise
pass
def search(self, value):
"""Search for a node with a specific value"""
# This is an exercise
pass
| 23.816568 | 64 | 0.553789 |
639dfa4bc3d148e274257ac46be6a79da9c0ab73 | 8,754 | py | Python | my_happy_pandas/core/window/common.py | ggservice007/my-happy-pandas | 63145d54e452177f7d5b2fc8fdbc1fdf37dd5b16 | [
"Apache-2.0"
] | null | null | null | my_happy_pandas/core/window/common.py | ggservice007/my-happy-pandas | 63145d54e452177f7d5b2fc8fdbc1fdf37dd5b16 | [
"Apache-2.0"
] | null | null | null | my_happy_pandas/core/window/common.py | ggservice007/my-happy-pandas | 63145d54e452177f7d5b2fc8fdbc1fdf37dd5b16 | [
"Apache-2.0"
] | null | null | null | """Common utility functions for rolling operations"""
from collections import defaultdict
from typing import Callable, Optional
import warnings
import numpy as np
from my_happy_pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from my_happy_pandas.core.generic import _shared_docs
from my_happy_pandas.core.groupby.base import GroupByMixin
from my_happy_pandas.core.indexes.api import MultiIndex
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
my_happy_pandas.Series.%(name)s : Calling object with Series data.
pandas.DataFrame.%(name)s : Calling object with DataFrame data.
my_happy_pandas.Series.%(func_name)s : Similar method for Series.
pandas.DataFrame.%(func_name)s : Similar method for DataFrame.
"""
def _dispatch(name: str, *args, **kwargs):
"""
Dispatch to apply.
"""
def outer(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
outer.__name__ = name
return outer
class WindowGroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
kwargs.pop("parent", None)
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj._selected_obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = _dispatch("count")
corr = _dispatch("corr", other=None, pairwise=None)
cov = _dispatch("cov", other=None, pairwise=None)
def _apply(
self,
func: Callable,
center: bool,
require_min_periods: int = 0,
floor: int = 1,
is_weighted: bool = False,
name: Optional[str] = None,
use_numba_cache: bool = False,
**kwargs,
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
kwargs.pop("floor", None)
kwargs.pop("original_func", None)
# TODO: can we de-duplicate with _dispatch?
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (
isinstance(arg1, (np.ndarray, ABCSeries, ABCDataFrame))
and isinstance(arg2, (np.ndarray, ABCSeries, ABCDataFrame))
):
raise TypeError(
"arguments to moment function must be of type np.ndarray/Series/DataFrame"
)
if isinstance(arg1, (np.ndarray, ABCSeries)) and isinstance(
arg2, (np.ndarray, ABCSeries)
):
X, Y = prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, ABCDataFrame):
from my_happy_pandas import DataFrame
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, ABCDataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
X, Y = arg1.align(arg2, join="outer")
X = X + 0 * Y
Y = Y + 0 * X
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index, columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j < i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(
*prep_binary(arg1.iloc[:, i], arg2.iloc[:, j])
)
from my_happy_pandas import concat
result_index = arg1.index.union(arg2.index)
if len(result_index):
# construct result frame
result = concat(
[
concat(
[results[i][j] for j, c in enumerate(arg2.columns)],
ignore_index=True,
)
for i, c in enumerate(arg1.columns)
],
ignore_index=True,
axis=1,
)
result.columns = arg1.columns
# set the index and reorder
if arg2.columns.nlevels > 1:
result.index = MultiIndex.from_product(
arg2.columns.levels + [result_index]
)
# GH 34440
num_levels = len(result.index.levels)
new_order = [num_levels - 1] + list(range(num_levels - 1))
result = result.reorder_levels(new_order).sort_index()
else:
result.index = MultiIndex.from_product(
[range(len(arg2.columns)), range(len(result_index))]
)
result = result.swaplevel(1, 0).sort_index()
result.index = MultiIndex.from_product(
[result_index] + [arg2.columns]
)
else:
# empty result
result = DataFrame(
index=MultiIndex(
levels=[arg1.index, arg2.columns], codes=[[], []]
),
columns=arg2.columns,
dtype="float64",
)
# reset our index names to arg1 names
# reset our column names to arg2 names
# careful not to mutate the original names
result.columns = result.columns.set_names(arg1.columns.names)
result.index = result.index.set_names(
result_index.names + arg2.columns.names
)
return result
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {
i: f(*prep_binary(arg1.iloc[:, i], arg2))
for i, col in enumerate(arg1.columns)
}
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
def zsqrt(x):
with np.errstate(all="ignore"):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, ABCDataFrame):
if mask._values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception("Input arrays must be of the same type!")
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
| 34.738095 | 86 | 0.514736 |
2f77b55d6f7dc601167b68b4a56d717186145e2b | 374 | py | Python | siteconfig/commands/eval.py | mikeboers/siteconfig | 7124e941cf5068a70f07d0011902af797b74657e | [
"BSD-3-Clause"
] | null | null | null | siteconfig/commands/eval.py | mikeboers/siteconfig | 7124e941cf5068a70f07d0011902af797b74657e | [
"BSD-3-Clause"
] | null | null | null | siteconfig/commands/eval.py | mikeboers/siteconfig | 7124e941cf5068a70f07d0011902af797b74657e | [
"BSD-3-Clause"
] | null | null | null | import sys
from .main import command, argument
@command(
argument('expr', nargs='+'),
name='eval',
help='lookup a single key',
)
def eval_(args, config):
globals_ = dict(config)
locals_ = {'config': config}
value = eval(' '.join(args.expr), globals_, locals_)
if value is None:
return 1
sys.stdout.write(str(value) + args.endl)
| 17.809524 | 56 | 0.617647 |
07f7fa818aa0e2b943b87414b4af607d67f6a475 | 8,784 | py | Python | python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-12-09T08:59:17.000Z | 2021-12-09T08:59:17.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.backward import append_backward
import paddle.fluid.framework as framework
from paddle.fluid.framework import Program, switch_main_program
import bisect
import numpy as np
fluid.default_startup_program().random_seed = 1
np.random.seed(1)
class TestDyRnnStaticInput(unittest.TestCase):
def setUp(self):
self._delta = 0.005
self._max_sequence_len = 3
self._program = Program()
switch_main_program(self._program)
self.output_dim = 10
self.place = core.CPUPlace()
self.prepare_x_tensor()
self.prepare_static_input_tensor()
self.exe = fluid.Executor(self.place)
def prepare_x_tensor(self):
self.x_tensor_dim = 10
lod = [[2, 1, 3]]
shape = [sum(lod[0]), self.x_tensor_dim]
self.x_tensor_data = np.random.random(shape).astype('float32')
self.x_tensor = core.LoDTensor()
self.x_tensor.set_recursive_sequence_lengths(lod)
self.x_tensor.set(self.x_tensor_data, self.place)
def prepare_static_input_tensor(self):
self.static_input_tensor_dim = 4
lod = [[1, 2, 3]]
shape = [sum(lod[0]), self.static_input_tensor_dim]
self.static_input_data = np.random.random(shape).astype('float32')
self.static_input_tensor = core.LoDTensor()
self.static_input_tensor.set_recursive_sequence_lengths(lod)
self.static_input_tensor.set(self.static_input_data, self.place)
def fetch_value(self, var):
fetch_outs = self.exe.run(feed={
'x_tensor':
self.x_tensor,
'static_input_tensor':
self.static_input_tensor
},
fetch_list=[var],
return_numpy=False)
return self._lodtensor_to_ndarray(fetch_outs[0])
def _lodtensor_to_ndarray(self, lod_tensor):
dims = lod_tensor.shape()
ndarray = np.zeros(shape=dims).astype('float32')
for i in range(np.product(dims)):
ndarray.ravel()[i] = lod_tensor._get_float_element(i)
return ndarray, lod_tensor.recursive_sequence_lengths()
def build_graph(self, only_forward=False):
x_tensor = fluid.layers.data(name='x_tensor',
shape=[self.x_tensor_dim],
dtype='float32',
lod_level=1)
x_tensor.stop_gradient = False
static_input_tensor = fluid.layers.data(
name='static_input_tensor',
shape=[self.static_input_tensor_dim],
dtype='float32',
lod_level=1)
static_input_tensor.stop_gradient = False
if only_forward:
static_input_out_array = self._program.global_block().create_var(
name='static_input_out_array',
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype='float32')
static_input_out_array.stop_gradient = True
rnn = fluid.layers.DynamicRNN()
with rnn.block():
step_x = rnn.step_input(x_tensor)
step_static_input = rnn.static_input(static_input_tensor)
if only_forward:
fluid.layers.array_write(x=step_static_input,
i=rnn.step_idx,
array=static_input_out_array)
last = fluid.layers.sequence_pool(input=step_static_input,
pool_type='last')
projected = fluid.layers.fc(input=[step_x, last],
size=self.output_dim)
rnn.output(projected)
if only_forward:
static_input_step_outs = []
step_idx = fluid.layers.fill_constant(shape=[1],
dtype='int64',
value=0)
step_idx.stop_gradient = True
for i in range(self._max_sequence_len):
step_out = fluid.layers.array_read(static_input_out_array,
step_idx)
step_out.stop_gradient = True
static_input_step_outs.append(step_out)
fluid.layers.increment(x=step_idx, value=1.0, in_place=True)
if only_forward:
return static_input_step_outs
last = fluid.layers.sequence_pool(input=rnn(), pool_type='last')
loss = fluid.layers.mean(last)
append_backward(loss)
static_input_grad = self._program.global_block().var(
framework.grad_var_name('static_input_tensor'))
return static_input_grad, loss
def get_expected_static_step_outs(self):
x_lod = self.x_tensor.recursive_sequence_lengths()
x_seq_len = x_lod[0]
x_seq_len_sorted = sorted(x_seq_len)
x_sorted_indices = np.argsort(x_seq_len)[::-1]
static_lod = self.static_input_tensor.recursive_sequence_lengths()
static_sliced = []
cur_offset = 0
for i in range(len(static_lod[0])):
static_sliced.append(
self.static_input_data[cur_offset:(cur_offset +
static_lod[0][i])])
cur_offset += static_lod[0][i]
static_seq_len = static_lod[0]
static_reordered = []
for i in range(len(x_sorted_indices)):
static_reordered.extend(static_sliced[x_sorted_indices[i]].tolist())
static_seq_len_reordered = [
static_seq_len[x_sorted_indices[i]]
for i in range(len(x_sorted_indices))
]
static_step_outs = []
static_step_lods = []
for i in range(self._max_sequence_len):
end = len(x_seq_len) - bisect.bisect_left(x_seq_len_sorted, i + 1)
lod = []
total_len = 0
for i in range(end):
lod.append(static_seq_len_reordered[i])
total_len += lod[-1]
static_step_lods.append([lod])
end = total_len
static_step_outs.append(
np.array(static_reordered[:end]).astype('float32'))
return static_step_outs, static_step_lods
def test_step_out(self):
static_step_outs = self.build_graph(only_forward=True)
self.exe.run(framework.default_startup_program())
expected_outs, expected_lods = self.get_expected_static_step_outs()
for i in range(self._max_sequence_len):
step_out, lod = self.fetch_value(static_step_outs[i])
self.assertTrue(np.allclose(step_out, expected_outs[i]))
self.assertTrue(np.allclose(lod, expected_lods[i]))
def test_network_gradient(self):
static_input_grad, loss = self.build_graph()
self.exe.run(framework.default_startup_program())
actual_gradients, actual_lod = self.fetch_value(static_input_grad)
static_input_shape = self.static_input_tensor.shape()
numeric_gradients = np.zeros(shape=static_input_shape).astype('float32')
# calculate numeric gradients
tensor_size = np.product(static_input_shape)
for i in range(tensor_size):
origin = self.static_input_tensor._get_float_element(i)
x_pos = origin + self._delta
self.static_input_tensor._set_float_element(i, x_pos)
y_pos = self.fetch_value(loss)[0][0]
x_neg = origin - self._delta
self.static_input_tensor._set_float_element(i, x_neg)
y_neg = self.fetch_value(loss)[0][0]
self.static_input_tensor._set_float_element(i, origin)
numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2
self.assertTrue(np.allclose(actual_gradients, numeric_gradients, 0.001))
self.assertTrue(
np.allclose(actual_lod,
self.static_input_tensor.recursive_sequence_lengths()))
if __name__ == '__main__':
unittest.main()
| 40.666667 | 80 | 0.619536 |
8bd6c65962d800081a042034bbddab478e8c9844 | 826 | py | Python | dynamic_programming/howSum.py | Yasir323/Data-Structures-and-Algorithms-in-Python | b721d0ca0218b9665d4f6ca0bbfd4417244bcdf0 | [
"MIT"
] | null | null | null | dynamic_programming/howSum.py | Yasir323/Data-Structures-and-Algorithms-in-Python | b721d0ca0218b9665d4f6ca0bbfd4417244bcdf0 | [
"MIT"
] | null | null | null | dynamic_programming/howSum.py | Yasir323/Data-Structures-and-Algorithms-in-Python | b721d0ca0218b9665d4f6ca0bbfd4417244bcdf0 | [
"MIT"
] | null | null | null | # Program to return any combination that
# adds up to a number, 'targetSum'
"""
m = target, determines the height of the tree
n = array length, determines complexity
This has a O(n^m * m) time complexity and O(m)
space complexity when solving without memoization.
Memoized Solution:
Time = O(m*n*m)
Space = O(m*m)
"""
import functools
@functools.lru_cache()
def how_sum(target, numbers):
if target == 0:
return []
if target < 0:
return -1
for number in numbers:
remainder = target - number
remainder_result = how_sum(remainder, numbers)
if remainder_result != -1:
return remainder_result + [number]
return -1
print(how_sum(7, (2, 3)))
print(how_sum(7, (5, 3, 4, 7)))
print(how_sum(7, (2, 4)))
print(how_sum(8, (2, 3, 5)))
print(how_sum(300, (7, 14)))
| 22.324324 | 54 | 0.642857 |
8774c15028247ce0cb6e72c42c6ea7bd0f9d4e07 | 7,231 | py | Python | sdk/tables/azure-data-tables/azure/data/tables/aio/_base_client_async.py | pelanzag/azure-sdk-for-python | d97bd98f09d2d8b7517d84a97c363241bf56afd0 | [
"MIT"
] | null | null | null | sdk/tables/azure-data-tables/azure/data/tables/aio/_base_client_async.py | pelanzag/azure-sdk-for-python | d97bd98f09d2d8b7517d84a97c363241bf56afd0 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/tables/azure-data-tables/azure/data/tables/aio/_base_client_async.py | pelanzag/azure-sdk-for-python | d97bd98f09d2d8b7517d84a97c363241bf56afd0 | [
"MIT"
] | 1 | 2019-04-05T18:17:43.000Z | 2019-04-05T18:17:43.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import Any, List
from uuid import uuid4
from azure.core.credentials import AzureSasCredential
from azure.core.exceptions import ResourceNotFoundError, ClientAuthenticationError
from azure.core.pipeline.policies import (
ContentDecodePolicy,
AsyncBearerTokenCredentialPolicy,
AsyncRedirectPolicy,
DistributedTracingPolicy,
HttpLoggingPolicy,
UserAgentPolicy,
ProxyPolicy,
AzureSasCredentialPolicy,
RequestIdPolicy,
CustomHookPolicy,
NetworkTraceLoggingPolicy
)
from azure.core.pipeline.transport import (
AsyncHttpTransport,
HttpRequest,
)
from .._generated.aio import AzureTable
from .._base_client import AccountHostsMixin, get_api_version, extract_batch_part_metadata
from .._authentication import SharedKeyCredentialPolicy
from .._constants import STORAGE_OAUTH_SCOPE
from .._error import RequestTooLargeError
from .._models import BatchErrorException
from .._policies import StorageHosts, StorageHeadersPolicy
from .._sdk_moniker import SDK_MONIKER
from ._policies_async import AsyncTablesRetryPolicy
class AsyncTablesBaseClient(AccountHostsMixin):
def __init__(
self,
account_url, # type: str
credential=None, # type: str
**kwargs # type: Any
):
# type: (...) -> None
super(AsyncTablesBaseClient, self).__init__(account_url, credential=credential, **kwargs)
self._client = AzureTable(
self.url,
policies=kwargs.pop('policies', self._policies),
**kwargs
)
self._client._config.version = get_api_version(kwargs, self._client._config.version) # pylint: disable=protected-access
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *args):
await self._client.__aexit__(*args)
async def close(self) -> None:
"""This method is to close the sockets opened by the client.
It need not be used when using with a context manager.
"""
await self._client.close()
def _configure_credential(self, credential):
# type: (Any) -> None
if hasattr(credential, "get_token"):
self._credential_policy = AsyncBearerTokenCredentialPolicy(
credential, STORAGE_OAUTH_SCOPE
)
elif isinstance(credential, SharedKeyCredentialPolicy):
self._credential_policy = credential
elif isinstance(credential, AzureSasCredential):
self._credential_policy = AzureSasCredentialPolicy(credential)
elif credential is not None:
raise TypeError("Unsupported credential: {}".format(credential))
def _configure_policies(self, **kwargs):
return [
RequestIdPolicy(**kwargs),
StorageHeadersPolicy(**kwargs),
UserAgentPolicy(sdk_moniker=SDK_MONIKER, **kwargs),
ProxyPolicy(**kwargs),
self._credential_policy,
ContentDecodePolicy(response_encoding="utf-8"),
AsyncRedirectPolicy(**kwargs),
StorageHosts(**kwargs),
AsyncTablesRetryPolicy(**kwargs),
CustomHookPolicy(**kwargs),
NetworkTraceLoggingPolicy(**kwargs),
DistributedTracingPolicy(**kwargs),
HttpLoggingPolicy(**kwargs),
]
async def _batch_send(
self,
entities, # type: List[TableEntity]
*reqs: "HttpRequest",
**kwargs
):
"""Given a series of request, do a Storage batch call."""
# Pop it here, so requests doesn't feel bad about additional kwarg
policies = [StorageHeadersPolicy()]
changeset = HttpRequest("POST", None)
changeset.set_multipart_mixed(
*reqs, policies=policies, boundary="changeset_{}".format(uuid4())
)
request = self._client._client.post( # pylint: disable=protected-access
url="https://{}/$batch".format(self._primary_hostname),
headers={
"x-ms-version": self.api_version,
"DataServiceVersion": "3.0",
"MaxDataServiceVersion": "3.0;NetFx",
"Content-Type": "application/json",
"Accept": "application/json"
},
)
request.set_multipart_mixed(
changeset,
policies=policies,
enforce_https=False,
boundary="batch_{}".format(uuid4()),
)
pipeline_response = await self._client._client._pipeline.run(request, **kwargs) # pylint: disable=protected-access
response = pipeline_response.http_response
if response.status_code == 403:
raise ClientAuthenticationError(
message="There was an error authenticating with the service",
response=response,
)
if response.status_code == 404:
raise ResourceNotFoundError(
message="The resource could not be found", response=response
)
if response.status_code == 413:
raise RequestTooLargeError(
message="The request was too large", response=response
)
if response.status_code != 202:
raise BatchErrorException(
message="There is a failure in the batch operation.",
response=response,
parts=None,
)
parts_iter = response.parts()
parts = []
async for p in parts_iter:
parts.append(p)
if any(p for p in parts if not 200 <= p.status_code < 300):
if any(p for p in parts if p.status_code == 404):
raise ResourceNotFoundError(
message="The resource could not be found", response=response
)
if any(p for p in parts if p.status_code == 413):
raise RequestTooLargeError(
message="The request was too large", response=response
)
raise BatchErrorException(
message="There is a failure in the batch operation.",
response=response,
parts=parts,
)
return list(zip(entities, (extract_batch_part_metadata(p) for p in parts)))
class AsyncTransportWrapper(AsyncHttpTransport):
"""Wrapper class that ensures that an inner client created
by a `get_client` method does not close the outer transport for the parent
when used in a context manager.
"""
def __init__(self, async_transport):
self._transport = async_transport
async def send(self, request, **kwargs):
return await self._transport.send(request, **kwargs)
async def open(self):
pass
async def close(self):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args): # pylint: disable=arguments-differ
pass
| 35.975124 | 128 | 0.619555 |
df0ac10ed8311fa2b85843d5365bc686690ed491 | 22,591 | py | Python | msticpy/sectools/iocextract.py | Noezor/msticpy | f0d6d0d0bbaeba1ca060787b9929350804fa6dc5 | [
"MIT"
] | 4 | 2021-02-02T19:49:54.000Z | 2021-12-09T04:04:13.000Z | msticpy/sectools/iocextract.py | Noezor/msticpy | f0d6d0d0bbaeba1ca060787b9929350804fa6dc5 | [
"MIT"
] | null | null | null | msticpy/sectools/iocextract.py | Noezor/msticpy | f0d6d0d0bbaeba1ca060787b9929350804fa6dc5 | [
"MIT"
] | 2 | 2021-12-06T21:43:14.000Z | 2022-03-11T21:45:03.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Module for IoCExtract class.
Uses a set of builtin regular expressions to look for Indicator of
Compromise (IoC) patterns. Input can be a single string or a pandas
dataframe with one or more columns specified as input.
The following types are built-in:
- IPv4 and IPv6
- URL
- DNS domain
- Hashes (MD5, SHA1, SHA256)
- Windows file paths
- Linux file paths (this is kind of noisy because a legal linux file
path can have almost any character) You can modify or add to the
regular expressions used at runtime.
"""
import re
from collections import defaultdict, namedtuple
from enum import Enum
from typing import Any, Dict, List, Set, Tuple, Union
from urllib.parse import unquote
import pandas as pd
from .._version import VERSION
from ..common.utility import export
from .domain_utils import DomainValidator
__version__ = VERSION
__author__ = "Ian Hellen"
def _compile_regex(regex):
return re.compile(regex, re.I | re.X | re.M)
IoCPattern = namedtuple("IoCPattern", ["ioc_type", "comp_regex", "priority", "group"])
@export
class IoCType(Enum):
"""Enumeration of IoC Types."""
unknown = "unknown"
ipv4 = "ipv4"
ipv6 = "ipv6"
dns = "dns"
url = "url"
md5_hash = "md5_hash"
sha1_hash = "sha1_hash"
sha256_hash = "sha256_hash"
file_hash = "file_hash"
email = "email"
windows_path = "windows_path"
linux_path = "linux_path"
hostname = "hostname"
@classmethod
def parse(cls, value: str) -> "IoCType":
"""
Return parsed IoCType of string.
Parameters
----------
value : str
Enumeration name
Returns
-------
IoCType
IoCType matching name or unknown if no match
"""
try:
ioc_type = IoCType(value.lower())
except ValueError:
ioc_type = IoCType.unknown
return ioc_type
@export
class IoCExtract:
"""
IoC Extractor - looks for common IoC patterns in input strings.
The extract() method takes either a string or a pandas DataFrame
as input. When using the string option as an input extract will
return a dictionary of results. When using a DataFrame the results
will be returned as a new DataFrame with the following columns:
IoCType: the mnemonic used to distinguish different IoC Types
Observable: the actual value of the observable
SourceIndex: the index of the row in the input DataFrame from
which the source for the IoC observable was extracted.
The class has a number of built-in IoC regex definitions.
These can be retrieved using the ioc_types attribute.
Addition IoC definitions can be added using the add_ioc_type
method.
Note: due to some ambiguity in the regular expression patterns
for different types and observable may be returned assigned to
multiple observable types. E.g. 192.168.0.1 is a also a legal file
name in both Linux and Windows. Linux file names have a particularly
large scope in terms of legal characters so it will be quite common
to see other IoC observables (or parts of them) returned as a
possible linux path.
"""
IPV4_REGEX = r"(?P<ipaddress>(?:[0-9]{1,3}\.){3}[0-9]{1,3})"
IPV6_REGEX = r"(?<![:.\w])(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4}(?![:.\w])"
DNS_REGEX = r"((?=[a-z0-9-]{1,63}\.)[a-z0-9]+(-[a-z0-9]+)*\.){1,126}[a-z]{2,63}"
# dns_regex =
# '\\b((?=[a-z0-9-]{1,63}\\.)[a-z0-9]+(-[a-z0-9]+)*\\.){2,}[a-z]{2,63}\\b'
URL_REGEX = r"""
(?P<protocol>(https?|ftp|telnet|ldap|file)://)
(?P<userinfo>([a-z0-9-._~!$&\'()*+,;=:]|%[0-9A-F]{2})*@)?
(?P<host>([a-z0-9-._~!$&\'()*+,;=]|%[0-9A-F]{2})*)
(:(?P<port>\d*))?
(/(?P<path>([^?\#"<>\s]|%[0-9A-F]{2})*/?))?
(\?(?P<query>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?
(\#(?P<fragment>([a-z0-9-._~!$&'()*+,;=:/?@]|%[0-9A-F]{2})*))?"""
WINPATH_REGEX = r"""
(?P<root>[a-z]:|\\\\[a-z0-9_.$-]+||[.]+)
(?P<folder>\\(?:[^\/:*?"\'<>|\r\n]+\\)*)
(?P<file>[^\\/*?""<>|\r\n ]+)"""
# Linux simplified - this ignores some legal linux paths avoid matching too much
# This also matches URLs but these should be thrown out by priority
# weighting since URL has a higher priority
LXPATH_REGEX = r"""(?P<root>/+||[.]+)
(?P<folder>/(?:[^\\/:*?<>|\r\n]+/)*)
(?P<file>[^/\0<>|\r\n ]+)"""
MD5_REGEX = r"(?:^|[^A-Fa-f0-9])(?P<hash>[A-Fa-f0-9]{32})(?:$|[^A-Fa-f0-9])"
SHA1_REGEX = r"(?:^|[^A-Fa-f0-9])(?P<hash>[A-Fa-f0-9]{40})(?:$|[^A-Fa-f0-9])"
SHA256_REGEX = r"(?:^|[^A-Fa-f0-9])(?P<hash>[A-Fa-f0-9]{64})(?:$|[^A-Fa-f0-9])"
_content_regex: Dict[str, IoCPattern] = {}
def __init__(self):
"""Intialize new instance of IoCExtract."""
# IP Addresses
self.add_ioc_type(IoCType.ipv4.name, self.IPV4_REGEX, 0, "ipaddress")
self.add_ioc_type(IoCType.ipv6.name, self.IPV6_REGEX, 0)
# Dns Domains
# This also matches IP addresses but IPs have higher
# priority both matching on the same substring will defer
# to the IP regex
self.add_ioc_type(IoCType.dns.name, self.DNS_REGEX, 1)
# Http requests
self.add_ioc_type(IoCType.url.name, self.URL_REGEX, 0)
# File paths
# Windows
self.add_ioc_type(IoCType.windows_path.name, self.WINPATH_REGEX, 2)
self.add_ioc_type(IoCType.linux_path.name, self.LXPATH_REGEX, 2)
# MD5, SHA1, SHA256 hashes
self.add_ioc_type(IoCType.md5_hash.name, self.MD5_REGEX, 1, "hash")
self.add_ioc_type(IoCType.sha1_hash.name, self.SHA1_REGEX, 1, "hash")
self.add_ioc_type(IoCType.sha256_hash.name, self.SHA256_REGEX, 1, "hash")
self.dom_val = DomainValidator()
# Public members
def add_ioc_type(
self, ioc_type: str, ioc_regex: str, priority: int = 0, group: str = None
):
"""
Add an IoC type and regular expression to use to the built-in set.
Parameters
----------
ioc_type : str
A unique name for the IoC type
ioc_regex : str
A regular expression used to search for the type
priority : int, optional
Priority of the regex match vs. other ioc_patterns. 0 is
the highest priority (the default is 0).
group : str, optional
The regex group to match (the default is None,
which will match on the whole expression)
Notes
-----
Pattern priorities.
If two IocType patterns match on the same substring, the matched
substring is assigned to the pattern/IocType with the highest
priority. E.g. `foo.bar.com` will match types: `dns`, `windows_path`
and `linux_path` but since `dns` has a higher priority, the expression
is assigned to the `dns` matches.
"""
if ioc_type is None or ioc_type.strip() is None:
raise Exception("No value supplied for ioc_type parameter")
if ioc_regex is None or ioc_regex.strip() is None:
raise Exception("No value supplied for ioc_regex parameter")
self._content_regex[ioc_type] = IoCPattern(
ioc_type=ioc_type,
comp_regex=_compile_regex(regex=ioc_regex),
priority=priority,
group=group,
)
@property
def ioc_types(self) -> dict:
"""
Return the current set of IoC types and regular expressions.
Returns
-------
dict
dict of IoC Type names and regular expressions
"""
return self._content_regex
# pylint: disable=too-many-locals
def extract(
self,
src: str = None,
data: pd.DataFrame = None,
columns: List[str] = None,
**kwargs,
) -> Union[Dict[str, Set[str]], pd.DataFrame]:
"""
Extract IoCs from either a string or pandas DataFrame.
Parameters
----------
src : str, optional
source string in which to look for IoC patterns
(the default is None)
data : pd.DataFrame, optional
input DataFrame from which to read source strings
(the default is None)
columns : list, optional
The list of columns to use as source strings,
if the `data` parameter is used. (the default is None)
Other Parameters
----------------
ioc_types : list, optional
Restrict matching to just specified types.
(default is all types)
include_paths : bool, optional
Whether to include path matches (which can be noisy)
(the default is false - excludes 'windows_path'
and 'linux_path'). If `ioc_types` is specified
this parameter is ignored.
Returns
-------
Any
dict of found observables (if input is a string) or
DataFrame of observables
Notes
-----
Extract takes either a string or a pandas DataFrame as input.
When using the string option as an input extract will
return a dictionary of results.
When using a DataFrame the results will be returned as a new
DataFrame with the following columns:
- IoCType: the mnemonic used to distinguish different IoC Types
- Observable: the actual value of the observable
- SourceIndex: the index of the row in the input DataFrame from
which the source for the IoC observable was extracted.
IoCType Pattern selection
The default list is: ['ipv4', 'ipv6', 'dns', 'url',
'md5_hash', 'sha1_hash', 'sha256_hash'] plus any
user-defined types.
'windows_path', 'linux_path' are excluded unless `include_paths`
is True or explicitly included in `ioc_paths`.
"""
ioc_types = kwargs.get("ioc_types", None)
include_paths = kwargs.get("include_paths", False)
if src and src.strip():
return self._scan_for_iocs(src=src, ioc_types=ioc_types)
if data is None:
raise Exception("No source data was supplied to extract")
if columns is None:
raise Exception("No values were supplied for the columns parameter")
ioc_types_to_use = self._get_ioc_types_to_use(ioc_types, include_paths)
col_set = set(columns)
if not col_set <= set(data.columns):
missing_cols = [elem for elem in col_set if elem not in data.columns]
raise Exception(
"Source column(s) {} not found in supplied DataFrame".format(
", ".join(missing_cols)
)
)
result_columns = ["IoCType", "Observable", "SourceIndex"]
result_rows: List[pd.Series] = []
for idx, datarow in data.iterrows():
result_rows.extend(
self._search_in_row(
datarow, idx, columns, result_columns, ioc_types_to_use
)
)
result_frame = pd.DataFrame(data=result_rows, columns=result_columns)
return result_frame
# pylint: disable=too-many-arguments
def _search_in_row(
self,
datarow: pd.Series,
idx: Any,
columns: List[str],
result_columns: List[str],
ioc_types_to_use: List[str],
) -> List[pd.Series]:
"""Return results for a single input row."""
result_rows = []
for col in columns:
ioc_results = self._scan_for_iocs(datarow[col], ioc_types_to_use)
for result_type, result_set in ioc_results.items():
if result_set:
for observable in result_set:
result_row = pd.Series(
data=[result_type, observable, idx], index=result_columns
)
result_rows.append(result_row)
return result_rows
def extract_df(
self, data: pd.DataFrame, columns: List[str], **kwargs
) -> pd.DataFrame:
"""
Extract IoCs from either a pandas DataFrame.
Parameters
----------
data : pd.DataFrame
input DataFrame from which to read source strings
columns : list
The list of columns to use as source strings,
Other Parameters
----------------
ioc_types : list, optional
Restrict matching to just specified types.
(default is all types)
include_paths : bool, optional
Whether to include path matches (which can be noisy)
(the default is false - excludes 'windows_path'
and 'linux_path'). If `ioc_types` is specified
this parameter is ignored.
Returns
-------
pd.DataFrame
DataFrame of observables
Notes
-----
Extract takes a pandas DataFrame as input.
The results will be returned as a new
DataFrame with the following columns:
- IoCType: the mnemonic used to distinguish different IoC Types
- Observable: the actual value of the observable
- SourceIndex: the index of the row in the input DataFrame from
which the source for the IoC observable was extracted.
IoCType Pattern selection
The default list is: ['ipv4', 'ipv6', 'dns', 'url',
'md5_hash', 'sha1_hash', 'sha256_hash'] plus any
user-defined types.
'windows_path', 'linux_path' are excluded unless `include_paths`
is True or explicitly included in `ioc_paths`.
"""
ioc_types = kwargs.get("ioc_types", None)
include_paths = kwargs.get("include_paths", False)
ioc_types_to_use = self._get_ioc_types_to_use(ioc_types, include_paths)
col_set = set(columns)
if not col_set <= set(data.columns):
missing_cols = [elem for elem in col_set if elem not in data.columns]
raise Exception(
"Source column(s) {} not found in supplied DataFrame".format(
", ".join(missing_cols)
)
)
result_columns = ["IoCType", "Observable", "SourceIndex"]
result_rows = []
for idx, datarow in data.iterrows():
result_rows.extend(
self._search_in_row(
datarow, idx, columns, result_columns, ioc_types_to_use
)
)
result_frame = pd.DataFrame(data=result_rows, columns=result_columns)
return result_frame
def _get_ioc_types_to_use(
self, ioc_types: List[str], include_paths: bool
) -> List[str]:
# Use only requested IoC Type patterns
if ioc_types:
ioc_types_to_use = list(set(ioc_types))
else:
ioc_types_to_use = list(set(self._content_regex.keys()))
# don't include linux paths unless explicitly included
ioc_types_to_use.remove(IoCType.linux_path.name)
if not include_paths:
# windows path matching is less noisy
ioc_types_to_use.remove(IoCType.windows_path.name)
return ioc_types_to_use
def validate(self, input_str: str, ioc_type: str) -> bool:
"""
Check that `input_str` matches the regex for the specificed `ioc_type`.
Parameters
----------
input_str : str
the string to test
ioc_type : str
the regex pattern to use
Returns
-------
bool
True if match.
"""
if ioc_type == IoCType.file_hash.name:
val_type = self.file_hash_type(input_str).name
elif ioc_type == IoCType.hostname.name:
val_type = "dns"
else:
val_type = ioc_type
if val_type not in self._content_regex:
raise KeyError(
"Unknown type {}. Valid types are: {}".format(
ioc_type, list(self._content_regex.keys())
)
)
rgx = self._content_regex[val_type]
pattern_match = rgx.comp_regex.fullmatch(input_str)
if val_type == "dns":
return self.dom_val.validate_tld(input_str) and pattern_match
return pattern_match is not None
@staticmethod
def file_hash_type(file_hash: str) -> IoCType:
"""
Return specific IoCType based on hash length.
Parameters
----------
file_hash : str
File hash string
Returns
-------
IoCType
Specific hash type or unknown.
"""
hashsize_map = {
32: IoCType.md5_hash,
40: IoCType.sha1_hash,
64: IoCType.sha256_hash,
}
hashsize = len(file_hash.strip())
return hashsize_map.get(hashsize, IoCType.unknown)
def get_ioc_type(self, observable: str) -> str:
"""
Return first matching type.
Parameters
----------
observable : str
The IoC Observable to check
Returns
-------
str
The IoC type enumeration (unknown, if no match)
"""
results = self._scan_for_iocs(src=observable)
if not results:
results = self._scan_for_iocs(
src=observable, ioc_types=[IoCType.linux_path.name]
)
if not results:
return IoCType.unknown.name
# we need to select the type that is an exact match for the whole
# observable string (_scan_for_iocs will return matching substrings)
for ioc_type, match_set in results.items():
if observable in match_set:
return ioc_type
return IoCType.unknown.name
# Private methods
def _scan_for_iocs(
self, src: str, ioc_types: List[str] = None
) -> Dict[str, Set[str]]:
"""Return IoCs found in the string."""
ioc_results: Dict[str, Set] = defaultdict(set)
iocs_found: Dict[str, Tuple[str, int]] = {}
# pylint: disable=too-many-nested-blocks
for (ioc_type, rgx_def) in self._content_regex.items():
if ioc_types and ioc_type not in ioc_types:
continue
match_pos = 0
for rgx_match in rgx_def.comp_regex.finditer(src, match_pos):
if rgx_match is None:
break
# If the rgx_def names a group to match on, use that
match_str = (
rgx_match.groupdict()[rgx_def.group]
if rgx_def.group
else rgx_match.group()
)
if ioc_type == "dns" and not self.dom_val.validate_tld(match_str):
continue
self._add_highest_pri_match(iocs_found, match_str, rgx_def)
if ioc_type == "url":
self._check_decode_url(match_str, rgx_def, match_pos, iocs_found)
match_pos = rgx_match.end()
for ioc, ioc_result in iocs_found.items():
ioc_results[ioc_result[0]].add(ioc)
return ioc_results
def _check_decode_url(self, match_str, rgx_def, match_pos, iocs_found):
"""Get any other IoCs from decoded URL."""
decoded_url = unquote(match_str)
for url_match in rgx_def.comp_regex.finditer(decoded_url, match_pos):
if url_match is not None:
self._add_highest_pri_match(iocs_found, url_match.group(), rgx_def)
self._add_highest_pri_match(
iocs_found,
url_match.groupdict()["host"],
self._content_regex["dns"],
)
@staticmethod
def _add_highest_pri_match(
iocs_found: dict, current_match: str, current_def: IoCPattern
):
# if we already found a match for this item and the previous
# ioc type is more specific then don't add this to the results
if (
current_match in iocs_found
and current_def.priority >= iocs_found[current_match][1]
):
return
iocs_found[current_match] = (current_def.ioc_type, current_def.priority)
# pylint: disable=too-few-public-methods
@pd.api.extensions.register_dataframe_accessor("mp_ioc")
class IoCExtractAccessor:
"""Pandas api extension for IoC Extractor."""
def __init__(self, pandas_obj):
"""Instantiate pandas extension class."""
self._df = pandas_obj
self._ioc = IoCExtract()
def extract(self, columns, **kwargs):
"""
Extract IoCs from either a pandas DataFrame.
Parameters
----------
columns : list
The list of columns to use as source strings,
Other Parameters
----------------
ioc_types : list, optional
Restrict matching to just specified types.
(default is all types)
include_paths : bool, optional
Whether to include path matches (which can be noisy)
(the default is false - excludes 'windows_path'
and 'linux_path'). If `ioc_types` is specified
this parameter is ignored.
Returns
-------
pd.DataFrame
DataFrame of observables
Notes
-----
Extract takes a pandas DataFrame as input.
The results will be returned as a new
DataFrame with the following columns:
- IoCType: the mnemonic used to distinguish different IoC Types
- Observable: the actual value of the observable
- SourceIndex: the index of the row in the input DataFrame from
which the source for the IoC observable was extracted.
IoCType Pattern selection
The default list is: ['ipv4', 'ipv6', 'dns', 'url',
'md5_hash', 'sha1_hash', 'sha256_hash'] plus any
user-defined types.
'windows_path', 'linux_path' are excluded unless `include_paths`
is True or explicitly included in `ioc_paths`.
"""
return self._ioc.extract_df(data=self._df, columns=columns, **kwargs)
| 35.024806 | 86 | 0.583507 |
e79051e6470f5f25afc6348c56f1f8aa978d9552 | 401 | py | Python | nirvaas_main/asgi.py | Ishikashah2510/nirvaas_main | 5eaf92756d06261a7f555b10aad864a34c9e761b | [
"MIT"
] | null | null | null | nirvaas_main/asgi.py | Ishikashah2510/nirvaas_main | 5eaf92756d06261a7f555b10aad864a34c9e761b | [
"MIT"
] | null | null | null | nirvaas_main/asgi.py | Ishikashah2510/nirvaas_main | 5eaf92756d06261a7f555b10aad864a34c9e761b | [
"MIT"
] | 3 | 2020-12-30T11:35:22.000Z | 2021-01-07T13:10:26.000Z | """
ASGI config for nirvaas_main project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nirvaas_main.settings')
application = get_asgi_application()
| 23.588235 | 78 | 0.790524 |
079a02b6832a0ee0d45b040c7a939873dca7eb05 | 405 | py | Python | website/migrations/0005_alter_contact_subject.py | ghassemiali/ghassemi7 | 2ee577a57fdd40df4e14214c16da266351cd7272 | [
"MIT"
] | null | null | null | website/migrations/0005_alter_contact_subject.py | ghassemiali/ghassemi7 | 2ee577a57fdd40df4e14214c16da266351cd7272 | [
"MIT"
] | null | null | null | website/migrations/0005_alter_contact_subject.py | ghassemiali/ghassemi7 | 2ee577a57fdd40df4e14214c16da266351cd7272 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.10 on 2022-01-18 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0004_alter_contact_subject'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='subject',
field=models.CharField(blank=True, max_length=255),
),
]
| 21.315789 | 63 | 0.609877 |
fb2be243bc7e08c47d100b417c21a1d719a160be | 5,700 | py | Python | scipy/linalg/_matfuncs_sqrtm.py | isjoung/scipy | 876a966a2b2016df9f7343f562ec70efa04a37f1 | [
"BSD-3-Clause"
] | null | null | null | scipy/linalg/_matfuncs_sqrtm.py | isjoung/scipy | 876a966a2b2016df9f7343f562ec70efa04a37f1 | [
"BSD-3-Clause"
] | null | null | null | scipy/linalg/_matfuncs_sqrtm.py | isjoung/scipy | 876a966a2b2016df9f7343f562ec70efa04a37f1 | [
"BSD-3-Clause"
] | 1 | 2017-03-02T23:53:50.000Z | 2017-03-02T23:53:50.000Z | """
Matrix square root for general matrices and for upper triangular matrices.
This module exists to avoid cyclic imports.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['sqrtm']
import numpy as np
# Local imports
from .misc import norm
from .lapack import ztrsyl, dtrsyl
from .decomp_schur import schur, rsf2csf
class SqrtmError(np.linalg.LinAlgError):
pass
def _sqrtm_triu(T, blocksize=64):
"""
Matrix square root of an upper triangular matrix.
This is a helper function for `sqrtm` and `logm`.
Parameters
----------
T : (N, N) array_like upper triangular
Matrix whose square root to evaluate
blocksize : int, optional
If the blocksize is not degenerate with respect to the
size of the input array, then use a blocked algorithm. (Default: 64)
Returns
-------
sqrtm : (N, N) ndarray
Value of the sqrt function at `T`
References
----------
.. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
"Blocked Schur Algorithms for Computing the Matrix Square Root,
Lecture Notes in Computer Science, 7782. pp. 171-182.
"""
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
if not keep_it_real:
T_diag = T_diag.astype(complex)
R = np.diag(np.sqrt(T_diag))
# Compute the number of blocks to use; use at least one block.
n, n = T.shape
nblocks = max(n // blocksize, 1)
# Compute the smaller of the two sizes of blocks that
# we will actually use, and compute the number of large blocks.
bsmall, nlarge = divmod(n, nblocks)
blarge = bsmall + 1
nsmall = nblocks - nlarge
if nsmall * bsmall + nlarge * blarge != n:
raise Exception('internal inconsistency')
# Define the index range covered by each block.
start_stop_pairs = []
start = 0
for count, size in ((nsmall, bsmall), (nlarge, blarge)):
for i in range(count):
start_stop_pairs.append((start, start + size))
start += size
# Within-block interactions.
for start, stop in start_stop_pairs:
for j in range(start, stop):
for i in range(j-1, start-1, -1):
s = 0
if j - i > 1:
s = R[i, i+1:j].dot(R[i+1:j, j])
denom = R[i, i] + R[j, j]
if not denom:
raise SqrtmError('failed to find the matrix square root')
R[i,j] = (T[i,j] - s) / denom
# Between-block interactions.
for j in range(nblocks):
jstart, jstop = start_stop_pairs[j]
for i in range(j-1, -1, -1):
istart, istop = start_stop_pairs[i]
S = T[istart:istop, jstart:jstop]
if j - i > 1:
S = S - R[istart:istop, istop:jstart].dot(
R[istop:jstart, jstart:jstop])
# Invoke LAPACK.
# For more details, see the solve_sylvester implemention
# and the fortran dtrsyl and ztrsyl docs.
Rii = R[istart:istop, istart:istop]
Rjj = R[jstart:jstop, jstart:jstop]
if keep_it_real:
x, scale, info = dtrsyl(Rii, Rjj, S)
else:
x, scale, info = ztrsyl(Rii, Rjj, S)
R[istart:istop, jstart:jstop] = x * scale
# Return the matrix square root.
return R
def sqrtm(A, disp=True, blocksize=64):
"""
Matrix square root.
Parameters
----------
A : (N, N) array_like
Matrix whose square root to evaluate
disp : bool, optional
Print warning if error in the result is estimated large
instead of returning estimated error. (Default: True)
blocksize : integer, optional
If the blocksize is not degenerate with respect to the
size of the input array, then use a blocked algorithm. (Default: 64)
Returns
-------
sqrtm : (N, N) ndarray
Value of the sqrt function at `A`
errest : float
(if disp == False)
Frobenius norm of the estimated error, ||err||_F / ||A||_F
References
----------
.. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013)
"Blocked Schur Algorithms for Computing the Matrix Square Root,
Lecture Notes in Computer Science, 7782. pp. 171-182.
Examples
--------
>>> a = np.array([[1.0, 3.0], [1.0, 4.0]])
>>> r = sqrtm(a)
>>> r
array([[ 0.75592895, 1.13389342],
[ 0.37796447, 1.88982237]])
>>> r.dot(r)
array([[ 1., 3.],
[ 1., 4.]])
"""
A = np.asarray(A)
if len(A.shape) != 2:
raise ValueError("Non-matrix input to matrix function.")
if blocksize < 1:
raise ValueError("The blocksize should be at least 1.")
keep_it_real = np.isrealobj(A)
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T,Z)
else:
T, Z = schur(A, output='complex')
failflag = False
try:
R = _sqrtm_triu(T, blocksize=blocksize)
ZH = np.conjugate(Z).T
X = Z.dot(R).dot(ZH)
except SqrtmError as e:
failflag = True
X = np.empty_like(A)
X.fill(np.nan)
if disp:
nzeig = np.any(np.diag(T) == 0)
if nzeig:
print("Matrix is singular and may not have a square root.")
elif failflag:
print("Failed to find a square root.")
return X
else:
try:
arg2 = norm(X.dot(X) - A,'fro')**2 / norm(A,'fro')
except ValueError:
# NaNs in matrix
arg2 = np.inf
return X, arg2
| 29.533679 | 77 | 0.566667 |
41b6d95b2a825a9987fdb8d300b37aa8aa653b28 | 1,121 | py | Python | units/file.py | huangjunxiong11/detailMatting | d6364f056ccd0511c527794758e5baf73f4fd176 | [
"MIT"
] | 1 | 2020-11-04T02:32:55.000Z | 2020-11-04T02:32:55.000Z | units/file.py | huangjunxiong11/detailMatting | d6364f056ccd0511c527794758e5baf73f4fd176 | [
"MIT"
] | 1 | 2020-04-26T16:47:28.000Z | 2020-04-29T09:28:36.000Z | units/file.py | huangjunxiong11/detailMatting | d6364f056ccd0511c527794758e5baf73f4fd176 | [
"MIT"
] | null | null | null | import glob
import os
import time
def read_file(path):
"""
返回类别与对应视频字典
:param path: 路径,例如‘../data/invideo’
:return: 字典
"""
today = time.strftime('%Y-%m-%d', time.localtime(time.time()))
dirtoday = os.path.join(path, today)
classdir = os.listdir(dirtoday)
csv = {}
for i, dir in enumerate(classdir):
dirs = os.path.join(dirtoday, dir)
mp4s = []
mp4s += glob.glob(os.path.join(dirtoday, dir, '*.mp4'))
mp4s += glob.glob(os.path.join(dirtoday, dir, '*.avi'))
mp4s += glob.glob(os.path.join(dirtoday, dir, '*.mov'))
csv[dirs] = mp4s
return csv
def get_dir(mp4):
"""
求得相应的输入目录
:param mp4: 路径,例如‘"../data/invideo/2020-04-27/shu/5-3.mp4"’
:return:例如'2020-04-27/shu/5-3'
"""
today = time.strftime('%Y-%m-%d', time.localtime(time.time()))
(dir, filename) = os.path.split(mp4)
shu_heng = dir.split('/')[-1]
name5_3 = filename.split('.')[0]
path = os.path.join(today, shu_heng, name5_3)
return path
# get_dir("../data/invideo/2020-04-27/shu/5-3.mp4")
# csv = read_file('../data/invideo/')
| 26.069767 | 66 | 0.582516 |
a0f461e931caae98b10433e248b5cb040a7072fe | 7,354 | py | Python | deepobs/tensorflow/datasets/fmnist.py | abahde/DeepOBS | 7ba549fe2ed77d6458a20ae9e8971df95830d821 | [
"MIT"
] | 7 | 2019-09-06T04:51:14.000Z | 2020-05-12T09:05:47.000Z | deepobs/tensorflow/datasets/fmnist.py | abahde/DeepOBS | 7ba549fe2ed77d6458a20ae9e8971df95830d821 | [
"MIT"
] | 16 | 2019-09-06T10:58:31.000Z | 2020-07-08T09:22:06.000Z | deepobs/tensorflow/datasets/fmnist.py | abahde/DeepOBS | 7ba549fe2ed77d6458a20ae9e8971df95830d821 | [
"MIT"
] | 5 | 2019-07-24T14:20:15.000Z | 2020-10-14T13:14:08.000Z | # -*- coding: utf-8 -*-
"""Fashion-MNIST DeepOBS dataset."""
from __future__ import print_function
import os
import gzip
import numpy as np
import tensorflow as tf
from . import dataset
from deepobs import config
class fmnist(dataset.DataSet):
"""DeepOBS data set class for the `Fashion-MNIST (FMNIST)\
<https://github.com/zalandoresearch/fashion-mnist>`_ data set.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (``60 000`` for train, ``10 000``
for test) the remainder is dropped in each epoch (after shuffling).
train_eval_size (int): Size of the train eval data set.
Defaults to ``10 000`` the size of the test set.
Attributes:
batch: A tuple ``(x, y)`` of tensors, yielding batches of MNIST images
(``x`` with shape ``(batch_size, 28, 28, 1)``) and corresponding one-hot
label vectors (``y`` with shape ``(batch_size, 10)``). Executing these
tensors raises a ``tf.errors.OutOfRangeError`` after one epoch.
train_init_op: A tensorflow operation initializing the dataset for the
training phase.
train_eval_init_op: A tensorflow operation initializing the testproblem for
evaluating on training data.
test_init_op: A tensorflow operation initializing the testproblem for
evaluating on test data.
phase: A string-value tf.Variable that is set to ``train``, ``train_eval``
or ``test``, depending on the current phase. This can be used by testproblems
to adapt their behavior to this phase.
"""
def __init__(self, batch_size, train_eval_size=10000):
"""Creates a new Fashion-MNIST instance.
Args:
batch_size (int): The mini-batch size to use. Note that, if ``batch_size``
is not a divider of the dataset size (``60 000`` for train, ``10 000``
for test) the remainder is dropped in each epoch (after shuffling).
train_eval_size (int): Size of the train eval data set.
Defaults to ``10 000`` the size of the test set.
"""
self._name = "fmnist"
self._train_eval_size = train_eval_size
super(fmnist, self).__init__(batch_size)
def _make_dataset(self, images_file, labels_file, shuffle=True):
"""Creates a Fashion-MNIST data set (helper used by ``.make_*_datset`` below).
Args:
images_file (str): Path to the images in compressed ``.gz`` files.
labels_file (str): Path to the labels in compressed ``.gz`` files.
shuffle (bool): Switch to turn on or off shuffling of the data set.
Defaults to ``True``.
Returns:
A tf.data.Dataset yielding batches of Fashion-MNIST data.
"""
X, y = self._read_mnist_data(images_file, labels_file)
with tf.name_scope("fmnist"):
with tf.device('/cpu:0'):
data = tf.data.Dataset.from_tensor_slices((X, y))
if shuffle:
data = data.shuffle(buffer_size=20000)
data = data.batch(self._batch_size, drop_remainder=True)
data = data.prefetch(buffer_size=4)
return data
def _make_train_dataset(self):
"""Creates the Fashion-MNIST training dataset.
Returns:
A tf.data.Dataset instance with batches of training data.
"""
data_dir = config.get_data_dir()
train_images_file = os.path.join(data_dir, "fmnist",
"train-images-idx3-ubyte.gz")
train_labels_file = os.path.join(data_dir, "fmnist",
"train-labels-idx1-ubyte.gz")
return self._make_dataset(
train_images_file, train_labels_file, shuffle=True)
def _make_train_eval_dataset(self):
"""Creates the Fashion-MNIST train eval dataset.
Returns:
A tf.data.Dataset instance with batches of training eval data.
"""
return self._train_dataset.take(
self._train_eval_size // self._batch_size)
def _make_test_dataset(self):
"""Creates the Fashion-MNIST test dataset.
Returns:
A tf.data.Dataset instance with batches of test data.
"""
data_dir = config.get_data_dir()
test_images_file = os.path.join(data_dir, "fmnist",
"t10k-images-idx3-ubyte.gz")
test_labels_file = os.path.join(data_dir, "fmnist",
"t10k-labels-idx1-ubyte.gz")
return self._make_dataset(
test_images_file, test_labels_file, shuffle=False)
# HELPER FUNCTIONS
def _read_mnist_data(self, images_file, labels_file):
"""Read the Fashion-MNIST images and labels from the downloaded files.
Args:
images_file (str): Path to the images in compressed ``.gz`` files.
labels_file (str): Path to the labels in compressed ``.gz`` files.
Returns:
tupel: Tupel consisting of all the images (`X`) and the labels (`y`).
"""
# Load images from images_file
with tf.gfile.Open(images_file, 'rb') as img_file:
print('Extracting %s' % img_file.name)
with gzip.GzipFile(fileobj=img_file) as bytestream:
magic = self._read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in Fashion-MNIST image file: %s'
% (magic, img_file.name))
num_images = self._read32(bytestream)
rows = self._read32(bytestream)
cols = self._read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
X = data.reshape(num_images, rows, cols, 1)
X = X.astype(np.float32) / 255.0
# Load labels from labels file
with tf.gfile.Open(labels_file, 'rb') as f:
print('Extracting %s' % f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = self._read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in Fashion-MNIST label file: %s'
% (magic, f.name))
num_items = self._read32(bytestream)
buf = bytestream.read(num_items)
y = np.frombuffer(buf, dtype=np.uint8)
y = self._dense_to_one_hot(y, 10)
y = y.astype(np.int32)
return X, y
def _read32(self, bytestream):
"""Helper function to read a bytestream.
Args:
bytestream (bytestream): Input bytestream.
Returns:
np.array: Bytestream as a np array.
"""
dtype = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dtype)[0]
def _dense_to_one_hot(self, labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
| 40.855556 | 86 | 0.606881 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.