code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module is for webkit-layout-tests-related operations."""
import re
from libs import test_name_util
from libs.test_results.base_test_results import BaseTestResults
from libs.test_results.classified_test_results import ClassifiedTestResults
# PASSING_STATUSES, FAILING_STATUSES and SKIPPING_STATUSES are copied from
# https://chromium.googlesource.com/chromium/tools/build/+/80940a89cc82f08cca98eb220d9c4b39a6000451/scripts/slave/recipe_modules/test_utils/util.py
PASSING_STATUSES = (
# PASS - The test ran as expected.
'PASS',
# REBASELINE, NEEDSREBASELINE, NEEDSMANUALREBASELINE - Layout test
# specific. Considers all *BASELINE results non-failures.
'REBASELINE',
'NEEDSREBASELINE',
'NEEDSMANUALREBASELINE',
)
FAILING_STATUSES = (
# FAIL - The test did not run as expected.
'FAIL',
# CRASH - The test runner crashed during the test.
'CRASH',
# TIMEOUT - The test hung (did not complete) and was aborted.
'TIMEOUT',
# MISSING - Layout test specific. The test completed but we could not
# find an expected baseline to compare against.
'MISSING',
# LEAK - Layout test specific. Memory leaks were detected during the
# test execution.
'LEAK',
# TEXT, AUDIO, IMAGE, IMAGE+TEXT - Layout test specific, deprecated.
# The test is expected to produce a failure for only some parts.
# Normally you will see "FAIL" instead.
'TEXT',
'AUDIO',
'IMAGE',
'IMAGE+TEXT',
)
SKIPPING_STATUSES = (
# SKIP - The test was not run.
'SKIP',
'WONTFIX')
# These statuses should not appear in actual results, rather they should only
# appear in expects.
NON_TEST_OUTCOME_EXPECTATIONS = ('REBASELINE', 'SKIP', 'SLOW', 'WONTFIX')
_BASE_FILE_PATH = 'third_party/blink/web_tests'
_VIRTUAL_TEST_NAME_PATTERN = re.compile(r'^virtual/[^/]+/(.*)$')
class WebkitLayoutTestResults(BaseTestResults):
def __init__(self, raw_test_results_json, partial_result=False):
super(WebkitLayoutTestResults, self).__init__(raw_test_results_json,
partial_result)
self.test_results_json = WebkitLayoutTestResults.FlattenTestResults(
raw_test_results_json)
def DoesTestExist(self, test_name):
"""Checks if can find the test name in test_results if result is valid.
Returns:
True if test_results_json is valid and the test exists in
test_results_json, False otherwise.
"""
return bool(
self.test_results_json and
(self.test_results_json.get('tests') or {}).get(test_name))
@property
def contains_all_tests(self):
"""
True if the test result is merged results for all shards; False if it's a
partial result.
"""
return not self.partial_result
def IsTestEnabled(self, test_name):
"""Returns True if the test is enabled, False otherwise.
A test can be skipped by setting the expected result to SKIP or WONTFIX.
But the actual result for a skipped test will only be SKIP but not WONTFIX.
"""
if not self.DoesTestExist(test_name):
return False
test_result = self.test_results_json['tests'][test_name]
return not any(s in test_result['expected'] for s in SKIPPING_STATUSES)
def GetFailedTestsInformation(self):
"""Parses the json data to get all reliable failures' information.
Currently this method will only get:
- failed tests in a test step on waterfall from output.json, not include
flakes (tests that eventually passed during retry).
TODO(crbug/836994): parse other test results to get failed tests info.
Returns:
failed_test_log: Logs for failed tests, currently empty string.
reliable_failed_tests: reliable failed tests, and the base name for each
test - For webkit_layout_test base name should be the same as test name.
"""
if not self.test_results_json or not self.test_results_json.get('tests'):
return {}, {}
failed_test_log = {}
reliable_failed_tests = {}
for test_name, test_result in self.test_results_json['tests'].iteritems():
if test_result.get('actual'): # pragma: no branch.
actuals = test_result['actual'].split(' ')
expects = test_result['expected'].split(' ')
if all(
result in FAILING_STATUSES and
not self.ResultWasExpected(result, expects)
for result in set(actuals)): # pragma: no branch.
# A relibale failure is found when all test results are failing
# statuses.
# For the case where test failed with different statuses, we still
# treat it as a reliable failure to be consistent with other tools.
reliable_failed_tests[test_name] = test_name
failed_test_log[test_name] = ''
return failed_test_log, reliable_failed_tests
def IsTestResultUseful(self):
"""Checks if the log contains useful information."""
return bool(
self.test_results_json and
self.test_results_json.get('num_failures_by_type') and
self.test_results_json.get('tests') and all(
isinstance(i, dict) and i.get('actual') and i.get('expected')
for i in self.test_results_json['tests'].itervalues()))
def GetTestLocation(self, test_name):
"""Gets test location for a specific test.
Test file path is constructed from test_name based on some heuristic rule:
1. For test_name in the format like 'virtual/a/bb/ccc.html', file path
should be: 'third_party/blink/web_tests/bb/ccc.html'
2. For other test names, file path should like
'third_party/blink/web_tests/%s' % test_name
# TODO(crbug/806002): Handle below cases.
There are other cases which has NOT been covered:
1. Baseline files: for example, for a test a/bb/ccc.html, it's
possible to find a file like
'third_party/blink/web_tests/a/bb/ccc_expected.txt'. Such files should
also be considered to add to test locations, but not covered right now.
2. Derived tests: for example, for a file named external/wpt/foo.any.js,
there will be two tests generated from it, external/wpt/foo.window.html
and external/wpt/foo.worker.html.
There will be no line number info for webkit_layout_tests because typically
a file is a test.
Note: Since the test location is gotten from heuristic, it will not be as
reliable as gtest (which is from test results log): file might not exist.
Returns:
(dict, str): A dict containing test location info and error string if any.
"""
if not self.DoesTestExist(test_name):
return None, 'test_location not found for %s.' % test_name
test_name = test_name_util.RemoveSuffixFromWebkitLayoutTestName(
test_name_util.RemoveVirtualLayersFromWebkitLayoutTestName(test_name))
return {
'line': None,
'file': '%s/%s' % (_BASE_FILE_PATH, test_name),
}, None
def GetClassifiedTestResults(self):
"""Parses webkit_layout_test results, counts and classifies test results by:
* status_group: passes/failures/skips/unknowns,
* status: actual result status.
Also counts number of expected and unexpected results for each test:
if the status is included in expects or can be considered as expected, it
is expected; otherwise it's unexpected.
Returns:
(ClassifiedTestResults) An object with information for each test:
* total_run: total number of runs,
* num_expected_results: total number of runs with expected results,
* num_unexpected_results: total number of runs with unexpected results,
* results: classified test results in 4 groups: passes, failures, skips
and unknowns. There's another 'notruns' group for gtests, but not
meaningful for webkit_layout_test, so it will always be empty here.
"""
if not self.IsTestResultUseful():
return {}
test_results = ClassifiedTestResults()
for test_name, test_result in self.test_results_json['tests'].iteritems():
actuals = test_result['actual'].split(' ')
expects = test_result['expected'].split(' ')
test_results[test_name].total_run = len(actuals)
for actual in actuals:
if self.ResultWasExpected(actual, expects):
test_results[test_name].num_expected_results += 1
else:
test_results[test_name].num_unexpected_results += 1
if actual in PASSING_STATUSES:
test_results[test_name].results.passes[actual] += 1
elif actual in FAILING_STATUSES:
test_results[test_name].results.failures[actual] += 1
elif actual in SKIPPING_STATUSES:
test_results[test_name].results.skips[actual] += 1
else:
test_results[test_name].results.unknowns[actual] += 1
return test_results
@staticmethod
def IsTestResultsInExpectedFormat(test_results_json):
"""Checks if the log can be parsed by this parser.
Args:
test_results_json (dict): It should be in one of below formats:
{
"tests": {
"mojom_tests": {
"parse": {
"ast_unittest": {
"ASTTest": {
"testNodeBase": {
"expected": "PASS",
"actual": "PASS",
"artifacts": {
"screenshot": ["screenshots/page.png"],
}
}
}
}
}
}
},
"interrupted": false,
"path_delimiter": ".",
"version": 3,
"seconds_since_epoch": 1406662283.764424,
"num_failures_by_type": {
"FAIL": 0,
"PASS": 1
},
"artifact_types": {
"screenshot": "image/png"
}
}
Or
{
"tests": {
"mojom_tests/parse/ast_unittest/ASTTest/testNodeBase": {
"expected": "PASS",
"actual": "PASS",
"artifacts": {
"screenshot": ["screenshots/page.png"],
}
}
},
"interrupted": false,
"path_delimiter": ".",
"version": 3,
"seconds_since_epoch": 1406662283.764424,
"num_failures_by_type": {
"FAIL": 0,
"PASS": 1
},
"artifact_types": {
"screenshot": "image/png"
}
}
"""
if (not isinstance(test_results_json, dict) or
not isinstance(test_results_json.get('tests'), dict)):
return False
flattened = WebkitLayoutTestResults.FlattenTestResults(test_results_json)
return all(
isinstance(i, dict) and i.get('actual') and i.get('expected')
for i in flattened['tests'].itervalues())
@staticmethod
def _GetPathDelimiter(test_results_json):
"""Gets path delimiter, default to '/'."""
return test_results_json.get('path_delimiter') or '/'
@staticmethod
def FlattenTestResults(test_results_json):
"""Flatten test_results_json['tests'] from a trie to a one level dict
and generate new format test_results_json."""
if not test_results_json or not test_results_json.get('tests'):
return test_results_json
sample_key = test_results_json['tests'].keys()[0]
path_delimiter = WebkitLayoutTestResults._GetPathDelimiter(
test_results_json)
if path_delimiter in sample_key:
# This should not happen in raw data, assuming the test results log is
# already flattened.
return test_results_json
# Checks if the sub_test_results_json is a leaf node.
# Checks if can find actual and expected keys in dict since they are
# required fields in per-test results.
def is_a_leaf(sub_test_results_json):
return (sub_test_results_json.get('actual') and
sub_test_results_json.get('expected'))
flattened = {}
def flatten(tests, parent_key=''):
for k, v in tests.items():
new_key = parent_key + path_delimiter + k if parent_key else k
if isinstance(v, dict):
if not is_a_leaf(v):
flatten(v, new_key)
else:
flattened[new_key] = v
new_results = {}
for k, v in test_results_json.iteritems():
if k == 'tests':
flatten(v)
new_results[k] = flattened
else:
new_results[k] = v
return new_results
@staticmethod
def GetMergedTestResults(shard_results):
"""Merges the shards into one and returns the flatten version.
Args:
shard_results (list): A list of dicts with individual shard results.
Returns:
A dict with
- all tests in shards
- constants across all shards
- accumulated values for some keys
"""
if len(shard_results) == 1:
return WebkitLayoutTestResults.FlattenTestResults(shard_results[0])
def MergeAddable(key, merged_value, shard_value):
if (merged_value and not isinstance(merged_value, type(shard_value))):
raise Exception('Different value types for key %s when merging '
'json test results.' % key)
if isinstance(shard_value, int):
merged_value = shard_value + (merged_value or 0)
elif isinstance(shard_value, dict):
merged_value = merged_value or {}
for sub_key, sub_value in shard_value.iteritems():
merged_value[sub_key] = MergeAddable(
sub_key, merged_value.get(sub_key), sub_value)
else:
raise Exception('Value for key %s is not addable.' % key)
return merged_value
merged_results = {}
def MergeShards(shard_result):
matching = [
'builder_name', 'build_number', 'chromium_revision', 'path_delimiter'
]
addable = [
'fixable', 'num_flaky', 'num_passes', 'num_regressions', 'skipped',
'skips', 'num_failures_by_type'
]
for key, value in shard_result.iteritems():
if key == 'interrupted':
# If any shard is interrupted, mark the whole thing as interrupted.
merged_results[key] = value or merged_results.get(key, False)
elif key in matching:
# These keys are constants which should be the same across all shards.
if key in merged_results and merged_results[key] != value:
raise Exception('Different values for key %s when merging '
'json test results: %s vs %s.' %
(key, merged_results.get(key), value))
merged_results[key] = value
elif key in addable:
# These keys are accumulated sums we want to add together.
merged_results[key] = MergeAddable(key, merged_results.get(key),
value)
elif key == 'tests':
merged_results[key] = merged_results.get(key) or {}
merged_results[key].update(value)
for shard_result in shard_results:
MergeShards(WebkitLayoutTestResults.FlattenTestResults(shard_result))
return merged_results
@staticmethod
def ResultWasExpected(result, expected_results):
# pylint: disable=line-too-long
"""Returns whether the result can be treated as an expected result.
Reference: https://chromium.googlesource.com/chromium/src/+/519d9521d16d9d3af3036daf4d1d5f4398f4396a/third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py#970
Args:
result: actual result of a test execution
expected_results: list of results listed in test_expectations
"""
if not set(expected_results) - set(NON_TEST_OUTCOME_EXPECTATIONS):
expected_results = set(['PASS'])
if result in expected_results:
return True
if result in ('PASS', 'TEXT', 'IMAGE', 'IMAGE+TEXT', 'AUDIO',
'MISSING') and 'NEEDSMANUALREBASELINE' in expected_results:
return True
if result in ('TEXT', 'IMAGE', 'IMAGE+TEXT',
'AUDIO') and 'FAIL' in expected_results:
return True
if result == 'MISSING' and 'REBASELINE' in expected_results:
return True
if result == 'SKIP':
return True
return False
| [
"libs.test_name_util.RemoveVirtualLayersFromWebkitLayoutTestName",
"libs.test_results.classified_test_results.ClassifiedTestResults",
"re.compile"
] | [((1972, 2006), 're.compile', 're.compile', (['"""^virtual/[^/]+/(.*)$"""'], {}), "('^virtual/[^/]+/(.*)$')\n", (1982, 2006), False, 'import re\n'), ((8064, 8087), 'libs.test_results.classified_test_results.ClassifiedTestResults', 'ClassifiedTestResults', ([], {}), '()\n', (8085, 8087), False, 'from libs.test_results.classified_test_results import ClassifiedTestResults\n'), ((6888, 6957), 'libs.test_name_util.RemoveVirtualLayersFromWebkitLayoutTestName', 'test_name_util.RemoveVirtualLayersFromWebkitLayoutTestName', (['test_name'], {}), '(test_name)\n', (6946, 6957), False, 'from libs import test_name_util\n')] |
import os
import zipfile
import hashlib
import logging
import json
from django.conf import settings
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import (AllowAny, IsAuthenticated, )
from .serializers import TestCaseSerializer
from .utils import (rand_str, filter_name_list)
from .conf import ErrorMsg
logger=logging.getLogger("")
logger.setLevel(level = logging.INFO)
log_file=logging.FileHandler("recvCases/log/log.txt")
log_file.setLevel(logging.INFO)
logger.addHandler(log_file)
class UploadCases(APIView):
permission_classes = [AllowAny,]
def post(self, request):
resp_data = {'code': 0, 'msg': 'success', 'data': {}}
req_serializer = TestCaseSerializer(data = request.data)
if not req_serializer.is_valid():
resp_data['code'] = -1
resp_data['msg'] = 'Request data error'
return Response(data=resp_data)
req_data = request.data
file = req_data['file']
zip_file = f"/tmp/{rand_str()}.zip"
with open(zip_file, "wb") as f:
for chunk in file:
f.write(chunk)
info, ret_str = self.process_zip(zip_file, req_data['problem_id'])
os.remove(zip_file)
if ret_str != "OK":
resp_data['msg'] = ErrorMsg[ret_str]
resp_data['code'] = -1
return Response(data = resp_data)
resp_data['data'] = info # info is a dict
resp_data['data']['problem_id'] = req_data['problem_id']
return Response(data = resp_data)
def process_zip(self, uploaded_zip_file, problem_id, dir=""):
try:
zip_file = zipfile.ZipFile(uploaded_zip_file, "r")
except zipfile.BadZipFile:
logger.info(f'{problem_id}: The uploaded test case zip file is bad.')
return {}, "BadZipFile"
name_list = zip_file.namelist()
test_case_list = filter_name_list(name_list, dir=dir)
if not test_case_list:
logger.info(f'{problem_id}: The uploaded test case zip file is empty.')
return {}, "EmptyZipFile"
test_case_id = problem_id
test_case_dir = os.path.join(settings.TEST_CASE_DIR, test_case_id)
try:
os.makedirs(test_case_dir)
except Exception as e:
pass
os.chmod(test_case_dir, 0o710)
size_cache = {}
md5_cache = {}
# 格式化压缩包中in out文件的换行符
# 并将文件落盘
for item in test_case_list:
with open(os.path.join(test_case_dir, item), "wb") as f:
content = zip_file.read(f"{dir}{item}").replace(b"\r\n", b"\n")
size_cache[item] = len(content)
if item.endswith(".out"):
md5_cache[item] = hashlib.md5(content.rstrip()).hexdigest()
f.write(content)
# 保留了spj字段 为了兼容judge server
# 不提供spj测试用例的上传
test_case_info = {"spj": False, "test_cases": {}}
info = {}
# ["1.in", "1.out", "2.in", "2.out"] => [("1.in", "1.out"), ("2.in", "2.out")]
test_case_list = zip(*[test_case_list[i::2] for i in range(2)])
# 有多少组用例,下面的循环执行几次
for index, item in enumerate(test_case_list):
data = {"stripped_output_md5": md5_cache[item[1]],
"input_size": size_cache[item[0]],
"output_size": size_cache[item[1]],
"input_name": item[0],
"output_name": item[1]}
info[str(index + 1)] = data
test_case_info["test_cases"][str(index + 1)] = data
# 写入测试用例的info文件
with open(os.path.join(test_case_dir, "info"), "w", encoding="utf-8") as f:
f.write(json.dumps(test_case_info, indent=4))
# 更改in out info文件的权限
for item in os.listdir(test_case_dir):
os.chmod(os.path.join(test_case_dir, item), 0o640)
logger.info(f'{problem_id}: Test cases upload success.')
return info, "OK" | [
"logging.getLogger",
"os.listdir",
"zipfile.ZipFile",
"os.makedirs",
"json.dumps",
"os.path.join",
"os.chmod",
"rest_framework.response.Response",
"logging.FileHandler",
"os.remove"
] | [((418, 439), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (435, 439), False, 'import logging\n'), ((487, 531), 'logging.FileHandler', 'logging.FileHandler', (['"""recvCases/log/log.txt"""'], {}), "('recvCases/log/log.txt')\n", (506, 531), False, 'import logging\n'), ((1178, 1197), 'os.remove', 'os.remove', (['zip_file'], {}), '(zip_file)\n', (1187, 1197), False, 'import os\n'), ((1440, 1464), 'rest_framework.response.Response', 'Response', ([], {'data': 'resp_data'}), '(data=resp_data)\n', (1448, 1464), False, 'from rest_framework.response import Response\n'), ((1989, 2039), 'os.path.join', 'os.path.join', (['settings.TEST_CASE_DIR', 'test_case_id'], {}), '(settings.TEST_CASE_DIR, test_case_id)\n', (2001, 2039), False, 'import os\n'), ((2112, 2140), 'os.chmod', 'os.chmod', (['test_case_dir', '(456)'], {}), '(test_case_dir, 456)\n', (2120, 2140), False, 'import os\n'), ((3318, 3343), 'os.listdir', 'os.listdir', (['test_case_dir'], {}), '(test_case_dir)\n', (3328, 3343), False, 'import os\n'), ((915, 939), 'rest_framework.response.Response', 'Response', ([], {'data': 'resp_data'}), '(data=resp_data)\n', (923, 939), False, 'from rest_framework.response import Response\n'), ((1297, 1321), 'rest_framework.response.Response', 'Response', ([], {'data': 'resp_data'}), '(data=resp_data)\n', (1305, 1321), False, 'from rest_framework.response import Response\n'), ((1553, 1592), 'zipfile.ZipFile', 'zipfile.ZipFile', (['uploaded_zip_file', '"""r"""'], {}), "(uploaded_zip_file, 'r')\n", (1568, 1592), False, 'import zipfile\n'), ((2050, 2076), 'os.makedirs', 'os.makedirs', (['test_case_dir'], {}), '(test_case_dir)\n', (2061, 2076), False, 'import os\n'), ((3165, 3200), 'os.path.join', 'os.path.join', (['test_case_dir', '"""info"""'], {}), "(test_case_dir, 'info')\n", (3177, 3200), False, 'import os\n'), ((3242, 3278), 'json.dumps', 'json.dumps', (['test_case_info'], {'indent': '(4)'}), '(test_case_info, indent=4)\n', (3252, 3278), False, 'import json\n'), ((3357, 3390), 'os.path.join', 'os.path.join', (['test_case_dir', 'item'], {}), '(test_case_dir, item)\n', (3369, 3390), False, 'import os\n'), ((2258, 2291), 'os.path.join', 'os.path.join', (['test_case_dir', 'item'], {}), '(test_case_dir, item)\n', (2270, 2291), False, 'import os\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Card Data Class.
The Model Card (MC) is the document designed for transparent reporting of AI
model provenance, usage, and ethics-informed evaluation. The model card can be
presented by different formats (e.g. HTML, PDF, Markdown). The properties of
the Model Card (MC) are defined by a json schema. The ModelCard class in the
ModelCardsToolkit serves as an API to read and write MC properties by the users.
"""
import abc
import json as json_lib
from typing import Any, Dict, List, Optional, Text
import dataclasses
from model_card_toolkit.proto import model_card_pb2
from model_card_toolkit.utils import validation
from google.protobuf import descriptor
from google.protobuf import message
_SCHEMA_VERSION_STRING = "schema_version"
# TODO(b/181702622): Think about a smart and clean way to control the required
# field.
class BaseModelCardField(abc.ABC):
"""Model card field base class.
This is an abstract class. All the model card fields should inherit this class
and override the _proto_type property to the corresponding proto type. This
abstract class provides methods `copy_from_proto`, `merge_from_proto` and
`to_proto` to convert the class from and to proto. The child class does not
need to override this unless it needs some special process.
"""
@property
@abc.abstractmethod
def _proto_type(self):
"""The proto type. Child class should overwrite this."""
def to_proto(self) -> message.Message:
"""Convert this class object to the proto."""
proto = self._proto_type()
for field_name, field_value in self.__dict__.items():
if not hasattr(proto, field_name):
raise ValueError("%s has no such field named '%s'." %
(type(proto), field_name))
if not field_value:
continue
field_descriptor = proto.DESCRIPTOR.fields_by_name[field_name]
# Process Message type.
if field_descriptor.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for nested_message in field_value:
getattr(proto, field_name).add().CopyFrom(nested_message.to_proto()) # pylint: disable=protected-access
else:
getattr(proto, field_name).CopyFrom(field_value.to_proto()) # pylint: disable=protected-access
# Process Non-Message type
else:
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
getattr(proto, field_name).extend(field_value)
else:
setattr(proto, field_name, field_value)
return proto
def _from_proto(self, proto: message.Message) -> "BaseModelCardField":
"""Convert proto to this class object."""
if not isinstance(proto, self._proto_type):
raise TypeError("%s is expected. However %s is provided." %
(self._proto_type, type(proto)))
for field_descriptor in proto.DESCRIPTOR.fields:
field_name = field_descriptor.name
if not hasattr(self, field_name):
raise ValueError("%s has no such field named '%s.'" %
(self, field_name))
# Process Message type.
if field_descriptor.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
# Clean the list first.
setattr(self, field_name, [])
for p in getattr(proto, field_name):
# To get the type hint of a list is not easy.
field = self.__annotations__[field_name].__args__[0]() # pytype: disable=attribute-error
field._from_proto(p) # pylint: disable=protected-access
getattr(self, field_name).append(field)
elif proto.HasField(field_name):
getattr(self, field_name)._from_proto(getattr(proto, field_name)) # pylint: disable=protected-access
# Process Non-Message type
else:
if field_descriptor.label == descriptor.FieldDescriptor.LABEL_REPEATED:
setattr(self, field_name, getattr(proto, field_name)[:])
elif proto.HasField(field_name):
setattr(self, field_name, getattr(proto, field_name))
return self
def merge_from_proto(self, proto: message.Message) -> "BaseModelCardField":
"""Merges the contents of the model card proto into current object."""
current = self.to_proto()
current.MergeFrom(proto)
self.clear()
return self._from_proto(current)
def copy_from_proto(self, proto: message.Message) -> "BaseModelCardField":
"""Copies the contents of the model card proto into current object."""
self.clear()
return self._from_proto(proto)
def to_json(self) -> Text:
"""Convert this class object to json."""
return json_lib.dumps(self.to_dict(), indent=2)
def to_dict(self) -> Dict[Text, Any]:
"""Convert your model card to a python dictionary."""
# ignore None properties recursively to allow missing values.
ignore_none = lambda properties: {k: v for k, v in properties if v}
return dataclasses.asdict(self, dict_factory=ignore_none)
def clear(self):
"""Clear the subfields of this BaseModelCardField."""
for field_name, field_value in self.__dict__.items():
if isinstance(field_value, BaseModelCardField):
field_value.clear()
elif isinstance(field_value, list):
setattr(self, field_name, [])
else:
setattr(self, field_name, None)
@classmethod
def _get_type(cls, obj: Any):
return type(obj)
@dataclasses.dataclass
class Owner(BaseModelCardField):
"""The information about owners of a model.
Attributes:
name: The name of the model owner.
contact: The contact information for the model owner or owners. These could
be individual email addresses, a team mailing list expressly, or a
monitored feedback form.
"""
name: Optional[Text] = None
contact: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Owner)] = model_card_pb2.Owner
@dataclasses.dataclass
class Version(BaseModelCardField):
"""The information about verions of a model.
If there are multiple versions of the model, or there may be in the future,
it’s useful for your audience to know which version of the model is
discussed
in the Model Card. If there are previous versions of this model, briefly
describe how this version is different. If no more than one version of the
model will be released, this field may be omitted.
Attributes:
name: The name of the version.
date: The date this version was released.
diff: The changes from the previous version.
"""
name: Optional[Text] = None
date: Optional[Text] = None
diff: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Version)] = model_card_pb2.Version
@dataclasses.dataclass
class License(BaseModelCardField):
"""The license information for a model.
Attributes:
identifier: A standard SPDX license identifier (https://spdx.org/licenses/),
or "proprietary" for an unlicensed Module.
custom_text: The text of a custom license.
"""
identifier: Optional[Text] = None
custom_text: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.License)] = model_card_pb2.License
@dataclasses.dataclass
class Reference(BaseModelCardField):
"""Reference for a model.
Attributes:
reference: A reference to a resource.
"""
reference: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Reference)] = model_card_pb2.Reference
@dataclasses.dataclass
class Citation(BaseModelCardField):
"""A citation for a model.
Attributes:
style: The citation style, such as MLA, APA, Chicago, or IEEE.
citation: the citation.
"""
style: Optional[Text] = None
citation: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Citation)] = model_card_pb2.Citation
@dataclasses.dataclass
class ModelDetails(BaseModelCardField):
"""This section provides a general, high-level description of the model.
Attributes:
name: The name of the model.
overview: A description of the model card.
documentation: A more thorough description of the model and its usage.
owners: The individuals or teams who own the model.
version: The version of the model.
licenses: The license information for the model. If the model is licensed
for use by others, include the license type. If the model is not licensed
for future use, you may state that here as well.
references: Provide any additional links the reader may need. You can link
to foundational research, technical documentation, or other materials that
may be useful to your audience.
citations: How should the model be cited? If the model is based on published
academic research, cite the research.
"""
name: Optional[Text] = None
overview: Optional[Text] = None
documentation: Optional[Text] = None
owners: List[Owner] = dataclasses.field(default_factory=list)
version: Optional[Version] = dataclasses.field(default_factory=Version)
licenses: List[License] = dataclasses.field(default_factory=list)
references: List[Reference] = dataclasses.field(default_factory=list)
citations: List[Citation] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.ModelDetails)] = model_card_pb2.ModelDetails
@dataclasses.dataclass
class Graphic(BaseModelCardField):
"""A named inline plot.
Attributes:
name: The name of the graphic.
image: The image string encoded as a base64 string.
"""
name: Optional[Text] = None
image: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Graphic)] = model_card_pb2.Graphic
@dataclasses.dataclass
class GraphicsCollection(BaseModelCardField):
"""A collection of graphics.
Each ```graphic``` in the ```collection``` field has both a ```name``` and
an ```image```. For instance, you might want to display a graph showing the
number of examples belonging to each class in your training dataset:
```python
model_card.model_parameters.data.train.graphics.collection = [
{'name': 'Training Set Size', 'image': training_set_size_barchart},
]
```
Then, provide a description of the graph:
```python
model_card.model_parameters.data.train.graphics.description = (
'This graph displays the number of examples belonging to each class ',
'in the training dataset. ')
```
Attributes:
description: The description of graphics.
collection: A collection of graphics.
"""
description: Optional[Text] = None
collection: List[Graphic] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.GraphicsCollection)] = model_card_pb2.GraphicsCollection
@dataclasses.dataclass
class SensitiveData(BaseModelCardField):
"""Sensitive data, such as PII (personally-identifiable information).
Attributes:
sensitive_data: A description of any sensitive data that may be present in a
dataset. Be sure to note PII information such as names, addresses, phone
numbers, etc. Preferably, such info should be scrubbed from a dataset if
possible. Note that even non-identifying information, such as zip code,
age, race, and gender, can be used to identify individuals when
aggregated. Please describe any such fields here.
"""
sensitive_data: List[Text] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.SensitiveData)] = model_card_pb2.SensitiveData
@dataclasses.dataclass
class Dataset(BaseModelCardField):
"""Provide some information about a dataset used to generate a model.
Attributes:
name: The name of the dataset.
description: The description of dataset.
link: A link to the dataset.
sensitive: Does this dataset contain human or other sensitive data?
graphics: Visualizations of the dataset.
"""
name: Optional[Text] = None
description: Optional[Text] = None
link: Optional[Text] = None
sensitive: Optional[SensitiveData] = dataclasses.field(
default_factory=SensitiveData)
graphics: GraphicsCollection = dataclasses.field(
default_factory=GraphicsCollection)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Dataset)] = model_card_pb2.Dataset
@dataclasses.dataclass
class ModelParameters(BaseModelCardField):
"""Parameters for construction of the model.
Attributes:
model_architecture: specifies the architecture of your model.
data: specifies the datasets used to train and evaluate your model.
input_format: describes the data format for inputs to your model.
output_format: describes the data format for outputs from your model.
"""
model_architecture: Optional[Text] = None
data: List[Dataset] = dataclasses.field(default_factory=list)
input_format: Optional[Text] = None
output_format: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.ModelParameters)] = model_card_pb2.ModelParameters
@dataclasses.dataclass
class PerformanceMetric(BaseModelCardField):
"""The details of the performance metric.
Attributes:
type: What performance metric are you reporting on?
value: What is the value of this performance metric?
slice: What slice of your data was this metric computed on?
"""
# TODO(b/179415408): add fields (name, value, confidence_interval, threshold,
# slice) after gathering requirements (potential clients: Jigsaw)
# The following fields are EXPERIMENTAL and introduced for migration purpose.
type: Optional[Text] = None
value: Optional[Text] = None
slice: Optional[Text] = None
_proto_type: dataclasses.InitVar[BaseModelCardField._get_type(
model_card_pb2.PerformanceMetric)] = model_card_pb2.PerformanceMetric
@dataclasses.dataclass
class QuantitativeAnalysis(BaseModelCardField):
"""The quantitative analysis of a model.
Identify relevant performance metrics and display values. Let’s say you’re
interested in displaying the accuracy and false positive rate (FPR) of a
cat vs. dog classification model. Assuming you have already computed both
metrics, both overall and per-class, you can specify metrics like so:
```python
model_card.quantitative_analysis.performance_metrics = [
{'type': 'accuracy', 'value': computed_accuracy},
{'type': 'accuracy', 'value': cat_accuracy, 'slice': 'cat'},
{'type': 'accuracy', 'value': dog_accuracy, 'slice': 'dog'},
{'type': 'fpr', 'value': computed_fpr},
{'type': 'fpr', 'value': cat_fpr, 'slice': 'cat'},
{'type': 'fpr', 'value': dog_fpr, 'slice': 'dog'},
]
```
Attributes:
performance_metrics: The performance metrics being reported.
graphics: A collection of visualizations of model performance.
"""
performance_metrics: List[PerformanceMetric] = dataclasses.field(
default_factory=list)
graphics: GraphicsCollection = dataclasses.field(
default_factory=GraphicsCollection)
_proto_type: dataclasses.InitVar[type(model_card_pb2.QuantitativeAnalysis
)] = model_card_pb2.QuantitativeAnalysis
@dataclasses.dataclass
class User(BaseModelCardField):
"""A type of user for a model.
Attributes:
description: A description of a user.
"""
description: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.User)] = model_card_pb2.User
@dataclasses.dataclass
class UseCase(BaseModelCardField):
"""A type of use case for a model.
Attributes:
description: A description of a use case.
"""
description: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.UseCase)] = model_card_pb2.UseCase
@dataclasses.dataclass
class Limitation(BaseModelCardField):
"""A limitation a model.
Attributes:
description: A description of the limitation.
"""
description: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Limitation)] = model_card_pb2.Limitation
@dataclasses.dataclass
class Tradeoff(BaseModelCardField):
"""A tradeoff for a model.
Attributes:
description: A description of the tradeoff.
"""
description: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Tradeoff)] = model_card_pb2.Tradeoff
@dataclasses.dataclass
class Risk(BaseModelCardField):
"""Information about risks involved when using the model.
Attributes:
name: The name of the risk.
mitigation_strategy: A mitigation strategy that you've implemented, or one
that you suggest to users.
"""
name: Optional[Text] = None
mitigation_strategy: Optional[Text] = None
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Risk)] = model_card_pb2.Risk
@dataclasses.dataclass
class Considerations(BaseModelCardField):
"""Considerations related to model construction, training, and application.
The considerations section includes qualitative information about your model,
including some analysis of its risks and limitations. As such, this section
usually requires careful consideration, and conversations with many relevant
stakeholders, including other model developers, dataset producers, and
downstream users likely to interact with your model, or be affected by its
outputs.
Attributes:
users: Who are the intended users of the model? This may include
researchers, developers, and/or clients. You might also include
information about the downstream users you expect to interact with your
model.
use_cases: What are the intended use cases of the model? What use cases are
out-of-scope?
limitations: What are the known limitations of the model? This may include
technical limitations, or conditions that may degrade model performance.
tradeoffs: What are the known accuracy/performance tradeoffs for the model?
ethical_considerations: What are the ethical risks involved in application
of this model? For each risk, you may also provide a mitigation strategy
that you've implemented, or one that you suggest to users.
"""
users: List[User] = dataclasses.field(default_factory=list)
use_cases: List[UseCase] = dataclasses.field(default_factory=list)
limitations: List[Limitation] = dataclasses.field(default_factory=list)
tradeoffs: List[Tradeoff] = dataclasses.field(default_factory=list)
ethical_considerations: List[Risk] = dataclasses.field(default_factory=list)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.Considerations)] = model_card_pb2.Considerations
@dataclasses.dataclass
class ModelCard(BaseModelCardField):
"""Fields used to generate the Model Card.
Attributes:
model_details: Descriptive metadata for the model.
model_parameters: Technical metadata for the model.
quantitative_analysis: Quantitative analysis of model performance.
considerations: Any considerations related to model construction, training,
and application.
"""
model_details: ModelDetails = dataclasses.field(default_factory=ModelDetails)
model_parameters: ModelParameters = dataclasses.field(
default_factory=ModelParameters)
quantitative_analysis: QuantitativeAnalysis = dataclasses.field(
default_factory=QuantitativeAnalysis)
considerations: Considerations = dataclasses.field(
default_factory=Considerations)
_proto_type: dataclasses.InitVar[type(
model_card_pb2.ModelCard)] = model_card_pb2.ModelCard
def to_json(self) -> Text:
"""Write ModelCard to JSON."""
model_card_dict = self.to_dict()
model_card_dict[
_SCHEMA_VERSION_STRING] = validation.get_latest_schema_version()
return json_lib.dumps(model_card_dict, indent=2)
def _from_json(self, json_dict: Dict[Text, Any]) -> "ModelCard":
"""Read ModelCard from JSON.
If ModelCard fields have already been set, this function will overwrite any
existing values.
WARNING: This method's interface may change in the future, do not use for
critical workflows.
Args:
json_dict: A JSON dict from which to populate fields in the model card
schema.
Returns:
self
Raises:
JSONDecodeError: If `json_dict` is not a valid JSON string.
ValidationError: If `json_dict` does not follow the model card JSON
schema.
ValueError: If `json_dict` contains a value not in the class or schema
definition.
"""
def _populate_from_json(json_dict: Dict[Text, Any],
field: BaseModelCardField) -> BaseModelCardField:
for subfield_key in json_dict:
if subfield_key.startswith(_SCHEMA_VERSION_STRING):
continue
elif not hasattr(field, subfield_key):
raise ValueError(
"BaseModelCardField %s has no such field named '%s.'" %
(field, subfield_key))
elif isinstance(json_dict[subfield_key], dict):
subfield_value = _populate_from_json(
json_dict[subfield_key], getattr(field, subfield_key))
elif isinstance(json_dict[subfield_key], list):
subfield_value = []
for item in json_dict[subfield_key]:
if isinstance(item, dict):
new_object = field.__annotations__[subfield_key].__args__[0]() # pytype: disable=attribute-error
subfield_value.append(_populate_from_json(item, new_object))
else: # if primitive
subfield_value.append(item)
else:
subfield_value = json_dict[subfield_key]
setattr(field, subfield_key, subfield_value)
return field
validation.validate_json_schema(json_dict)
self.clear()
_populate_from_json(json_dict, self)
return self
| [
"dataclasses.asdict",
"json.dumps",
"model_card_toolkit.utils.validation.validate_json_schema",
"dataclasses.field",
"model_card_toolkit.utils.validation.get_latest_schema_version"
] | [((9595, 9634), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (9612, 9634), False, 'import dataclasses\n'), ((9666, 9708), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'Version'}), '(default_factory=Version)\n', (9683, 9708), False, 'import dataclasses\n'), ((9737, 9776), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (9754, 9776), False, 'import dataclasses\n'), ((9809, 9848), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (9826, 9848), False, 'import dataclasses\n'), ((9879, 9918), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (9896, 9918), False, 'import dataclasses\n'), ((11289, 11328), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (11306, 11328), False, 'import dataclasses\n'), ((12082, 12121), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (12099, 12121), False, 'import dataclasses\n'), ((12751, 12799), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'SensitiveData'}), '(default_factory=SensitiveData)\n', (12768, 12799), False, 'import dataclasses\n'), ((12840, 12893), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'GraphicsCollection'}), '(default_factory=GraphicsCollection)\n', (12857, 12893), False, 'import dataclasses\n'), ((13485, 13524), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (13502, 13524), False, 'import dataclasses\n'), ((15529, 15568), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (15546, 15568), False, 'import dataclasses\n'), ((15609, 15662), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'GraphicsCollection'}), '(default_factory=GraphicsCollection)\n', (15626, 15662), False, 'import dataclasses\n'), ((18833, 18872), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (18850, 18872), False, 'import dataclasses\n'), ((18902, 18941), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (18919, 18941), False, 'import dataclasses\n'), ((18976, 19015), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (18993, 19015), False, 'import dataclasses\n'), ((19046, 19085), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (19063, 19085), False, 'import dataclasses\n'), ((19125, 19164), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (19142, 19164), False, 'import dataclasses\n'), ((19722, 19769), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'ModelDetails'}), '(default_factory=ModelDetails)\n', (19739, 19769), False, 'import dataclasses\n'), ((19808, 19858), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'ModelParameters'}), '(default_factory=ModelParameters)\n', (19825, 19858), False, 'import dataclasses\n'), ((19914, 19969), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'QuantitativeAnalysis'}), '(default_factory=QuantitativeAnalysis)\n', (19931, 19969), False, 'import dataclasses\n'), ((20012, 20061), 'dataclasses.field', 'dataclasses.field', ([], {'default_factory': 'Considerations'}), '(default_factory=Considerations)\n', (20029, 20061), False, 'import dataclasses\n'), ((5602, 5652), 'dataclasses.asdict', 'dataclasses.asdict', (['self'], {'dict_factory': 'ignore_none'}), '(self, dict_factory=ignore_none)\n', (5620, 5652), False, 'import dataclasses\n'), ((20328, 20366), 'model_card_toolkit.utils.validation.get_latest_schema_version', 'validation.get_latest_schema_version', ([], {}), '()\n', (20364, 20366), False, 'from model_card_toolkit.utils import validation\n'), ((20378, 20419), 'json.dumps', 'json_lib.dumps', (['model_card_dict'], {'indent': '(2)'}), '(model_card_dict, indent=2)\n', (20392, 20419), True, 'import json as json_lib\n'), ((22312, 22354), 'model_card_toolkit.utils.validation.validate_json_schema', 'validation.validate_json_schema', (['json_dict'], {}), '(json_dict)\n', (22343, 22354), False, 'from model_card_toolkit.utils import validation\n')] |
#!/usr/bin/env python
from __future__ import division, print_function
import numpy as np
import rospy
from rospkg.rospack import RosPack
from copy import deepcopy
from tf2_ros import TransformListener, Buffer
from bopt_grasp_quality.srv import bopt, boptResponse
from bayesian_optimization import Random_Explorer
from bayesian_optimization.opt_nodes import RS_Node
# from math import nan
from geometry_msgs.msg import PoseStamped, Pose, Transform
def TF2Pose(TF_msg):
new_pose = PoseStamped()
new_pose.header = TF_msg.header
new_pose.pose.position.x = TF_msg.transform.translation.x
new_pose.pose.position.y = TF_msg.transform.translation.y
new_pose.pose.position.z = TF_msg.transform.translation.z
new_pose.pose.orientation.x = TF_msg.transform.rotation.x
new_pose.pose.orientation.y = TF_msg.transform.rotation.y
new_pose.pose.orientation.z = TF_msg.transform.rotation.z
new_pose.pose.orientation.w = TF_msg.transform.rotation.w
return new_pose
if __name__ == "__main__":
rospy.init_node('ros_bo')
# lb_y = rospy.get_param('~lb_x', -.2)
# ub_y = rospy.get_param('~ub_x', .2)
lb_x = [float(xx) for xx in rospy.get_param('~lb_x', [-.2, 0., -.2])]
ub_x = [float(xx) for xx in rospy.get_param('~ub_x', [.2, 0., .2])]
ee_link = rospy.get_param('~ee_link', 'hand_root')
base_link = rospy.get_param('~base_link', 'world')
service_name = rospy.get_param('~commander_service', 'bayes_optimization')
n_iter = rospy.get_param('~search_iters', 20)
resolution = rospy.get_param('~resolution', .001)
tf_buffer = Buffer(rospy.Duration(50))
tf_listener = TransformListener(tf_buffer)
rospy.loginfo(rospy.get_name().split('/')[1] + ': Initialization....')
rospy.loginfo(rospy.get_name().split('/')[1] + ': Getting current pose....')
rospy.sleep(0.5)
try:
ARM_TF = tf_buffer.lookup_transform(base_link, ee_link, rospy.Time().now(), rospy.Duration(0.1))
current_pose = TF2Pose(ARM_TF)
except Exception as e:
rospy.logerr('error in finding the arm...')
rospy.logerr('Starting at (0, 0, 0), (0, 0, 0, 1)')
current_pose = PoseStamped()
current_pose.pose.orientation.w = 1.
pose = [
[current_pose.pose.position.x, current_pose.pose.position.y, current_pose.pose.position.z],
[current_pose.pose.orientation.x, current_pose.pose.orientation.y, current_pose.pose.orientation.z, current_pose.pose.orientation.w]]
rospy.loginfo(
rospy.get_name().split('/')[1] + ': starting at: ({:.3f}, {:.3f}, {:.3f})-({:.3f}, {:.3f}, {:.3f}, {:.3f})'.format(*pose[0] + pose[1])
)
n = len(lb_x)
init_pos = np.array([
current_pose.pose.position.x,
current_pose.pose.position.y,
current_pose.pose.position.z])
assert(len(lb_x) == len(ub_x))
params = {
Random_Explorer.PARAMS.iters :n_iter,
Random_Explorer.PARAMS.init_pos : init_pos,
Random_Explorer.PARAMS.sampling : [resolution] * n}
# lb = current_pose.pose.position.y + lb_x * np.ones((n,))
# ub = current_pose.pose.position.y + ub_x * np.ones((n,))
lb = init_pos[np.arange(len(lb_x))] + lb_x - 1e-10
ub = init_pos[np.arange(len(ub_x))] + ub_x
RS_Node(n, params, lb=lb, ub=ub, init_pose=current_pose.pose, service_name=service_name)
| [
"rospy.logerr",
"tf2_ros.TransformListener",
"rospy.init_node",
"rospy.get_param",
"numpy.array",
"rospy.Time",
"geometry_msgs.msg.PoseStamped",
"rospy.get_name",
"rospy.Duration",
"rospy.sleep",
"bayesian_optimization.opt_nodes.RS_Node"
] | [((486, 499), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (497, 499), False, 'from geometry_msgs.msg import PoseStamped, Pose, Transform\n'), ((1025, 1050), 'rospy.init_node', 'rospy.init_node', (['"""ros_bo"""'], {}), "('ros_bo')\n", (1040, 1050), False, 'import rospy\n'), ((1297, 1337), 'rospy.get_param', 'rospy.get_param', (['"""~ee_link"""', '"""hand_root"""'], {}), "('~ee_link', 'hand_root')\n", (1312, 1337), False, 'import rospy\n'), ((1354, 1392), 'rospy.get_param', 'rospy.get_param', (['"""~base_link"""', '"""world"""'], {}), "('~base_link', 'world')\n", (1369, 1392), False, 'import rospy\n'), ((1412, 1471), 'rospy.get_param', 'rospy.get_param', (['"""~commander_service"""', '"""bayes_optimization"""'], {}), "('~commander_service', 'bayes_optimization')\n", (1427, 1471), False, 'import rospy\n'), ((1485, 1521), 'rospy.get_param', 'rospy.get_param', (['"""~search_iters"""', '(20)'], {}), "('~search_iters', 20)\n", (1500, 1521), False, 'import rospy\n'), ((1539, 1576), 'rospy.get_param', 'rospy.get_param', (['"""~resolution"""', '(0.001)'], {}), "('~resolution', 0.001)\n", (1554, 1576), False, 'import rospy\n'), ((1638, 1666), 'tf2_ros.TransformListener', 'TransformListener', (['tf_buffer'], {}), '(tf_buffer)\n', (1655, 1666), False, 'from tf2_ros import TransformListener, Buffer\n'), ((1827, 1843), 'rospy.sleep', 'rospy.sleep', (['(0.5)'], {}), '(0.5)\n', (1838, 1843), False, 'import rospy\n'), ((2689, 2793), 'numpy.array', 'np.array', (['[current_pose.pose.position.x, current_pose.pose.position.y, current_pose.\n pose.position.z]'], {}), '([current_pose.pose.position.x, current_pose.pose.position.y,\n current_pose.pose.position.z])\n', (2697, 2793), True, 'import numpy as np\n'), ((3267, 3360), 'bayesian_optimization.opt_nodes.RS_Node', 'RS_Node', (['n', 'params'], {'lb': 'lb', 'ub': 'ub', 'init_pose': 'current_pose.pose', 'service_name': 'service_name'}), '(n, params, lb=lb, ub=ub, init_pose=current_pose.pose, service_name=\n service_name)\n', (3274, 3360), False, 'from bayesian_optimization.opt_nodes import RS_Node\n'), ((1600, 1618), 'rospy.Duration', 'rospy.Duration', (['(50)'], {}), '(50)\n', (1614, 1618), False, 'import rospy\n'), ((1169, 1212), 'rospy.get_param', 'rospy.get_param', (['"""~lb_x"""', '[-0.2, 0.0, -0.2]'], {}), "('~lb_x', [-0.2, 0.0, -0.2])\n", (1184, 1212), False, 'import rospy\n'), ((1243, 1284), 'rospy.get_param', 'rospy.get_param', (['"""~ub_x"""', '[0.2, 0.0, 0.2]'], {}), "('~ub_x', [0.2, 0.0, 0.2])\n", (1258, 1284), False, 'import rospy\n'), ((1937, 1956), 'rospy.Duration', 'rospy.Duration', (['(0.1)'], {}), '(0.1)\n', (1951, 1956), False, 'import rospy\n'), ((2032, 2075), 'rospy.logerr', 'rospy.logerr', (['"""error in finding the arm..."""'], {}), "('error in finding the arm...')\n", (2044, 2075), False, 'import rospy\n'), ((2084, 2135), 'rospy.logerr', 'rospy.logerr', (['"""Starting at (0, 0, 0), (0, 0, 0, 1)"""'], {}), "('Starting at (0, 0, 0), (0, 0, 0, 1)')\n", (2096, 2135), False, 'import rospy\n'), ((2160, 2173), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (2171, 2173), False, 'from geometry_msgs.msg import PoseStamped, Pose, Transform\n'), ((1917, 1929), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (1927, 1929), False, 'import rospy\n'), ((1685, 1701), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (1699, 1701), False, 'import rospy\n'), ((1760, 1776), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (1774, 1776), False, 'import rospy\n'), ((2510, 2526), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (2524, 2526), False, 'import rospy\n')] |
#!/usr/bin/env python
from time import sleep
from rediscache import rediscache
import time, redis
@rediscache(1, 2)
def getTestValue():
return (5, 'toto')
if __name__ == '__main__':
myfunction()
| [
"rediscache.rediscache"
] | [((103, 119), 'rediscache.rediscache', 'rediscache', (['(1)', '(2)'], {}), '(1, 2)\n', (113, 119), False, 'from rediscache import rediscache\n')] |
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
from .models import Event
UserModel = get_user_model()
class TestHappeningsGeneralViews(TestCase):
fixtures = ['events.json', 'users.json']
def setUp(self):
self.event = Event.objects.get(id=1)
self.user = UserModel.objects.all()[0]
def test_index(self):
"""
Test index
"""
resp = self.client.get(reverse('events_index'))
self.assertEqual(resp.status_code, 200)
self.assertTrue('object_list' in resp.context)
def test_events_by_region(self):
"""
Test events_by_region
"""
resp = self.client.get(reverse('events_by_region', args=['Pacific']))
self.assertEqual(resp.status_code, 200)
self.assertTrue('object_list' in resp.context)
self.assertTrue('region' in resp.context)
def test_event_detail(self):
"""
Test for valid event detail.
"""
resp = self.client.get(reverse('event_detail', args=[self.event.slug]))
self.assertEqual(resp.status_code, 200)
self.assertTrue('object' in resp.context)
self.assertTrue('key' in resp.context)
self.assertEquals(self.event.id, resp.context['object'].id)
if self.event.ended:
self.assertFalse('schedule/">Schedule</a>' in resp.content)
def test_event_creation(self):
"""
Test for valid event creation.
"""
self.client.login(username=self.user.username, password='<PASSWORD>')
response = self.client.get(reverse('add_event'))
self.assertEqual(response.status_code, 200)
self.assertTrue('form' in response.context)
new_event = {
"featured": True,
"has_playlist": False,
"submitted_by": 1,
"add_date": "2013-08-05",
"slug": "new-test-event",
"start_date": "2013-08-10",
"approved": True,
"info": "This is a new test event.",
"name": "<NAME>",
"region": "Pacific",
}
response = self.client.post(reverse('add_event'))
self.assertEqual(response.status_code, 200)
def test_event_editing(self):
"""
Test for valid event editing.
"""
response = self.client.get(reverse('edit-event', args=[self.event.slug]))
self.assertEqual(response.status_code, 200)
self.assertTrue('object' in response.context)
self.assertTrue('form' in response.context)
def test_ical_creation(self):
response = self.client.get(reverse('event_ical', args=[self.event.slug]))
self.assertEqual(response.status_code, 200)
self.assertTrue(response['Content-Type'].startswith('text/calendar'))
self.assertEquals(response['Filename'], 'filename.ics')
self.assertEquals(response['Content-Disposition'], 'attachment; filename=filename.ics')
response_list = response.content.split('\r\n')
self.assertEquals(response_list[0], 'BEGIN:VCALENDAR')
self.assertEquals(response_list[9], 'SUMMARY:Test Event')
| [
"django.contrib.auth.get_user_model",
"django.core.urlresolvers.reverse"
] | [((165, 181), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (179, 181), False, 'from django.contrib.auth import get_user_model\n'), ((488, 511), 'django.core.urlresolvers.reverse', 'reverse', (['"""events_index"""'], {}), "('events_index')\n", (495, 511), False, 'from django.core.urlresolvers import reverse\n'), ((739, 784), 'django.core.urlresolvers.reverse', 'reverse', (['"""events_by_region"""'], {'args': "['Pacific']"}), "('events_by_region', args=['Pacific'])\n", (746, 784), False, 'from django.core.urlresolvers import reverse\n'), ((1065, 1112), 'django.core.urlresolvers.reverse', 'reverse', (['"""event_detail"""'], {'args': '[self.event.slug]'}), "('event_detail', args=[self.event.slug])\n", (1072, 1112), False, 'from django.core.urlresolvers import reverse\n'), ((1641, 1661), 'django.core.urlresolvers.reverse', 'reverse', (['"""add_event"""'], {}), "('add_event')\n", (1648, 1661), False, 'from django.core.urlresolvers import reverse\n'), ((2189, 2209), 'django.core.urlresolvers.reverse', 'reverse', (['"""add_event"""'], {}), "('add_event')\n", (2196, 2209), False, 'from django.core.urlresolvers import reverse\n'), ((2395, 2440), 'django.core.urlresolvers.reverse', 'reverse', (['"""edit-event"""'], {'args': '[self.event.slug]'}), "('edit-event', args=[self.event.slug])\n", (2402, 2440), False, 'from django.core.urlresolvers import reverse\n'), ((2670, 2715), 'django.core.urlresolvers.reverse', 'reverse', (['"""event_ical"""'], {'args': '[self.event.slug]'}), "('event_ical', args=[self.event.slug])\n", (2677, 2715), False, 'from django.core.urlresolvers import reverse\n')] |
# -*- coding: UTF-8 -*-
#
# generated by wxGlade 0.9.3 on Wed Sep 11 13:50:00 2019
#
import wx
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MyDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.SetSize((473, 300))
self.ListExperiments = wx.ListBox(self, wx.ID_ANY, choices=[], style=0)
self.ButtonCopy = wx.Button(self, wx.ID_ANY, "Copy List to Clipboard")
self.ButtonClose = wx.Button(self, wx.ID_CLOSE, "")
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_BUTTON, self.on_copy, self.ButtonCopy)
self.Bind(wx.EVT_BUTTON, self.on_close, self.ButtonClose)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyDialog.__set_properties
self.SetTitle("dialog_1")
self.SetSize((473, 300))
self.ButtonClose.SetDefault()
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyDialog.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_1.Add(self.ListExperiments, 1, wx.ALL | wx.EXPAND, 10)
sizer_2.Add(self.ButtonCopy, 0, 0, 0)
sizer_2.Add((20, 20), 1, 0, 0)
sizer_2.Add(self.ButtonClose, 0, 0, 0)
sizer_1.Add(sizer_2, 0, wx.ALL | wx.EXPAND, 10)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def on_copy(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler 'on_copy' not implemented!")
event.Skip()
def on_close(self, event): # wxGlade: MyDialog.<event_handler>
print("Event handler 'on_close' not implemented!")
event.Skip()
# end of class MyDialog
| [
"wx.ListBox",
"wx.BoxSizer",
"wx.Button",
"wx.Dialog.__init__"
] | [((373, 412), 'wx.Dialog.__init__', 'wx.Dialog.__init__', (['self', '*args'], {}), '(self, *args, **kwds)\n', (391, 412), False, 'import wx\n'), ((477, 525), 'wx.ListBox', 'wx.ListBox', (['self', 'wx.ID_ANY'], {'choices': '[]', 'style': '(0)'}), '(self, wx.ID_ANY, choices=[], style=0)\n', (487, 525), False, 'import wx\n'), ((552, 604), 'wx.Button', 'wx.Button', (['self', 'wx.ID_ANY', '"""Copy List to Clipboard"""'], {}), "(self, wx.ID_ANY, 'Copy List to Clipboard')\n", (561, 604), False, 'import wx\n'), ((632, 664), 'wx.Button', 'wx.Button', (['self', 'wx.ID_CLOSE', '""""""'], {}), "(self, wx.ID_CLOSE, '')\n", (641, 664), False, 'import wx\n'), ((1181, 1205), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (1192, 1205), False, 'import wx\n'), ((1224, 1250), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (1235, 1250), False, 'import wx\n')] |
#!/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import numpy.linalg as nl
import scipy.linalg as sl
import scipy.stats as ss
import time
aca = np.ascontiguousarray
def nul(n):
return np.zeros((n, n))
def iuc(x, y):
"""
Checks if pair of generalized EVs x,y is inside the unit circle. Here for legacy reasons
"""
out = np.empty_like(x, dtype=bool)
nonzero = y != 0
# handles (x, y) = (0, 0) too
out[~nonzero] = False
out[nonzero] = abs(x[nonzero] / y[nonzero]) < 1.0
return out
def ouc(x, y):
"""
Check if pair of generalized EVs x,y is outside the unit circle. Here for legacy reasons
"""
# stolen from scipy and inverted
out = np.empty_like(x, dtype=bool)
nonzero = y != 0
# handles (x, y) = (0, 0) too
out[~nonzero] = True
out[nonzero] = abs(x[nonzero] / y[nonzero]) > 1.0
return out
def klein(A, B=None, nstates=None, verbose=False, force=False):
"""
Klein's method
"""
st = time.time()
if B is None:
B = np.eye(A.shape[0])
SS, TT, alp, bet, Q, Z = sl.ordqz(A, B, sort="ouc")
if np.any(np.isclose(alp, bet)):
mess = " Warning: unit root detected!"
else:
mess = ""
# check for precision
if not fast0(Q @ SS @ Z.T - A, 2):
raise ValueError("Numerical errors in QZ")
if verbose > 1:
out = np.empty_like(alp)
nonzero = bet != 0
out[~nonzero] = np.inf * np.abs(alp[~nonzero])
out[nonzero] = alp[nonzero] / bet[nonzero]
print(
"[RE solver:]".ljust(15, " ") + " Generalized EVs:\n", np.sort(np.abs(out))
)
# check for Blanchard-Kahn
out = ouc(alp, bet)
if not nstates:
nstates = sum(out)
else:
if not nstates == sum(out):
mess = (
"B-K condition not satisfied: %s states but %s Evs inside the unit circle."
% (nstates, sum(out))
+ mess
)
if not force:
raise ValueError(mess)
elif verbose:
print(mess)
S11 = SS[:nstates, :nstates]
T11 = TT[:nstates, :nstates]
Z11 = Z[:nstates, :nstates]
Z21 = Z[nstates:, :nstates]
# changed from sl to nl because of stability:
omg = Z21 @ nl.inv(Z11)
lam = Z11 @ nl.inv(S11) @ T11 @ nl.inv(Z11)
if verbose:
print(
"[RE solver:]".ljust(15, " ")
+ " Done in %s. Determinant of `Z11` is %1.2e. There are %s EVs o.u.c. (of %s)."
% (np.round((time.time() - st), 5), nl.det(Z11), sum(out), len(out))
+ mess
)
return omg, lam
# def re_bk(A, B=None, d_endo=None, verbose=False, force=False):
# """
# Klein's method
# """
# # TODO: rename this
# print('[RE solver:]'.ljust(15, ' ') +
# ' `re_bk` is depreciated. Use `klein` instead.')
# if B is None:
# B = np.eye(A.shape[0])
# MM, PP, alp, bet, Q, Z = sl.ordqz(A, B, sort='iuc')
# if not fast0(Q @ MM @ Z.T - A, 2):
# raise ValueError('Numerical errors in QZ')
# if verbose > 1:
# print('[RE solver:]'.ljust(15, ' ') +
# ' Pairs of `alp` and `bet`:\n', np.vstack((alp, bet)).T)
# out = ouc(alp, bet)
# if not d_endo:
# d_endo = sum(out)
# else:
# if sum(out) > d_endo:
# mess = 'B-K condition not satisfied: %s EVs outside the unit circle for %s forward looking variables.' % (
# sum(out), d_endo)
# elif sum(out) < d_endo:
# mess = 'B-K condition not satisfied: %s EVs outside the unit circle for %s forward looking variables.' % (
# sum(out), d_endo)
# else:
# mess = ''
# if mess and not force:
# raise ValueError(mess)
# elif mess and verbose:
# print(mess)
# Z21 = Z.T[-d_endo:, :d_endo]
# Z22 = Z.T[-d_endo:, d_endo:]
# if verbose:
# print('[RE solver:]'.ljust(
# 15, ' ')+' Determinant of `Z21` is %1.2e. There are %s EVs o.u.c.' % (nl.det(Z21), sum(out)))
# return -nl.inv(Z21) @ Z22
def lti(AA, BB, CC, dimp, dimq, tol=1e-6, check=False, verbose=False):
"""standard linear time iteration"""
if check:
pass
g = np.eye(dimq + dimp)
norm = tol + 1
icnt = 0
while norm > tol:
gn = g
g = -nl.solve(BB + AA @ g, CC)
norm = np.max(np.abs(gn - g))
icnt += 1
if verbose:
print(icnt)
omg = g[dimq:, :dimq]
lam = g[:dimq, :dimq]
return omg, lam
def speed_kills(A, B, dimp, dimq, selector=None, tol=1e-6, check=False, verbose=False):
"""Improved linear time iteration"""
q, A = nl.qr(A)
B = q.T @ B
B11i = nl.inv(B[dimq:, dimq:])
A[dimq:] = B11i @ A[dimq:]
B[dimq:] = B11i @ B[dimq:]
A[:dimq] -= B[:dimq, dimq:] @ A[dimq:]
B[:dimq, :dimq] -= B[:dimq, dimq:] @ B[dimq:, :dimq]
B[:dimq, dimq:] = 0
B[dimq:, dimq:] = np.eye(dimp)
A1 = A[:dimq, :dimq]
A3 = A[dimq:, dimq:]
A2 = A[:dimq, dimq:]
B1 = B[:dimq, :dimq]
B2 = B[dimq:, :dimq]
g = -B2
norm = tol + 1
icnt = 0
icnt = 0
while norm > tol:
gn = g
g = A3 @ g @ nl.solve(A1 + A2 @ g, B1) - B2
if selector is not None:
norm = np.max(np.abs(gn - g)[selector])
else:
norm = np.max(np.abs(gn - g))
icnt += 1
if verbose:
print(icnt)
if icnt == max_iter:
raise Exception("iteration did not converge")
return g, -nl.inv(A[:dimq, :dimq] + A2 @ g) @ B1
def fast0(A, mode=-1, tol=1e-08):
con = abs(A) < tol
if mode == -1:
return con
elif mode == 0:
return con.all(axis=0)
elif mode == 1:
return con.all(axis=1)
else:
return con.all()
def map2arr(iterator, return_np_array=True, check_nones=True):
"""Function to cast result from `map` to a tuple of stacked results
By default, this returns numpy arrays. Automatically checks if the map object is a tuple, and if not, just one object is returned (instead of a tuple). Be warned, this does not work if the result of interest of the mapped function is a single tuple.
Parameters
----------
iterator : iter
the iterator returning from `map`
Returns
-------
numpy array (optional: list)
"""
res = ()
mode = 0
for obj in iterator:
if check_nones and obj is None:
continue
if not mode:
if isinstance(obj, tuple):
for entry in obj:
res = res + ([entry],)
mode = 1
else:
res = [obj]
mode = 2
else:
if mode == 1:
for no, entry in enumerate(obj):
res[no].append(entry)
else:
res.append(obj)
if return_np_array:
if mode == 1:
res = tuple(np.array(tupo) for tupo in res)
else:
res = np.array(res)
return res
def napper(cond, interval=0.1):
import time
start_time = time.time()
while not cond():
elt = round(time.time() - start_time, 3)
print("Zzzz... " + str(elt) + "s", end="\r", flush=True)
time.sleep(interval)
print("Zzzz... " + str(elt) + "s.")
def timeprint(s, round_to=5, full=False):
if s < 60:
if full:
return str(np.round(s, round_to)) + " seconds"
return str(np.round(s, round_to)) + "s"
m, s = divmod(s, 60)
if m < 60:
if full:
return "%s minutes, %s seconds" % (int(m), int(s))
return "%sm%ss" % (int(m), int(s))
h, m = divmod(m, 60)
if full:
return "%s hours, %s minutes, %s seconds" % (int(h), int(m), int(s))
return "%sh%sm%ss" % (int(h), int(m), int(s))
def shuffle(a, axis=-1):
"""Shuffle along single axis"""
shape = a.shape
res = a.reshape(-1, a.shape[axis])
np.random.shuffle(res)
return res.reshape(shape)
def print_dict(d):
for k in d.keys():
print(str(k) + ":", d[k])
return 0
def sabs(x, eps=1e-10):
"""absolute value but smooth around 0"""
return np.sqrt(x ** 2 + eps)
# aliases
map2list = map2arr
indof = np.searchsorted
| [
"numpy.abs",
"numpy.eye",
"numpy.linalg.solve",
"numpy.linalg.qr",
"numpy.sqrt",
"numpy.isclose",
"time.sleep",
"numpy.linalg.det",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.empty_like",
"scipy.linalg.ordqz",
"time.time",
"numpy.round",
"numpy.random.shuffle"
] | [((200, 216), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (208, 216), True, 'import numpy as np\n'), ((354, 382), 'numpy.empty_like', 'np.empty_like', (['x'], {'dtype': 'bool'}), '(x, dtype=bool)\n', (367, 382), True, 'import numpy as np\n'), ((709, 737), 'numpy.empty_like', 'np.empty_like', (['x'], {'dtype': 'bool'}), '(x, dtype=bool)\n', (722, 737), True, 'import numpy as np\n'), ((999, 1010), 'time.time', 'time.time', ([], {}), '()\n', (1008, 1010), False, 'import time\n'), ((1090, 1116), 'scipy.linalg.ordqz', 'sl.ordqz', (['A', 'B'], {'sort': '"""ouc"""'}), "(A, B, sort='ouc')\n", (1098, 1116), True, 'import scipy.linalg as sl\n'), ((4041, 4060), 'numpy.eye', 'np.eye', (['(dimq + dimp)'], {}), '(dimq + dimp)\n', (4047, 4060), True, 'import numpy as np\n'), ((4481, 4489), 'numpy.linalg.qr', 'nl.qr', (['A'], {}), '(A)\n', (4486, 4489), True, 'import numpy.linalg as nl\n'), ((4518, 4541), 'numpy.linalg.inv', 'nl.inv', (['B[dimq:, dimq:]'], {}), '(B[dimq:, dimq:])\n', (4524, 4541), True, 'import numpy.linalg as nl\n'), ((4753, 4765), 'numpy.eye', 'np.eye', (['dimp'], {}), '(dimp)\n', (4759, 4765), True, 'import numpy as np\n'), ((6922, 6933), 'time.time', 'time.time', ([], {}), '()\n', (6931, 6933), False, 'import time\n'), ((7785, 7807), 'numpy.random.shuffle', 'np.random.shuffle', (['res'], {}), '(res)\n', (7802, 7807), True, 'import numpy as np\n'), ((8014, 8035), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + eps)'], {}), '(x ** 2 + eps)\n', (8021, 8035), True, 'import numpy as np\n'), ((1041, 1059), 'numpy.eye', 'np.eye', (['A.shape[0]'], {}), '(A.shape[0])\n', (1047, 1059), True, 'import numpy as np\n'), ((1132, 1152), 'numpy.isclose', 'np.isclose', (['alp', 'bet'], {}), '(alp, bet)\n', (1142, 1152), True, 'import numpy as np\n'), ((1382, 1400), 'numpy.empty_like', 'np.empty_like', (['alp'], {}), '(alp)\n', (1395, 1400), True, 'import numpy as np\n'), ((2305, 2316), 'numpy.linalg.inv', 'nl.inv', (['Z11'], {}), '(Z11)\n', (2311, 2316), True, 'import numpy.linalg as nl\n'), ((2353, 2364), 'numpy.linalg.inv', 'nl.inv', (['Z11'], {}), '(Z11)\n', (2359, 2364), True, 'import numpy.linalg as nl\n'), ((7080, 7100), 'time.sleep', 'time.sleep', (['interval'], {}), '(interval)\n', (7090, 7100), False, 'import time\n'), ((1461, 1482), 'numpy.abs', 'np.abs', (['alp[~nonzero]'], {}), '(alp[~nonzero])\n', (1467, 1482), True, 'import numpy as np\n'), ((4145, 4170), 'numpy.linalg.solve', 'nl.solve', (['(BB + AA @ g)', 'CC'], {}), '(BB + AA @ g, CC)\n', (4153, 4170), True, 'import numpy.linalg as nl\n'), ((4193, 4207), 'numpy.abs', 'np.abs', (['(gn - g)'], {}), '(gn - g)\n', (4199, 4207), True, 'import numpy as np\n'), ((6823, 6836), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (6831, 6836), True, 'import numpy as np\n'), ((1625, 1636), 'numpy.abs', 'np.abs', (['out'], {}), '(out)\n', (1631, 1636), True, 'import numpy as np\n'), ((2333, 2344), 'numpy.linalg.inv', 'nl.inv', (['S11'], {}), '(S11)\n', (2339, 2344), True, 'import numpy.linalg as nl\n'), ((5010, 5035), 'numpy.linalg.solve', 'nl.solve', (['(A1 + A2 @ g)', 'B1'], {}), '(A1 + A2 @ g, B1)\n', (5018, 5035), True, 'import numpy.linalg as nl\n'), ((5166, 5180), 'numpy.abs', 'np.abs', (['(gn - g)'], {}), '(gn - g)\n', (5172, 5180), True, 'import numpy as np\n'), ((5333, 5365), 'numpy.linalg.inv', 'nl.inv', (['(A[:dimq, :dimq] + A2 @ g)'], {}), '(A[:dimq, :dimq] + A2 @ g)\n', (5339, 5365), True, 'import numpy.linalg as nl\n'), ((6978, 6989), 'time.time', 'time.time', ([], {}), '()\n', (6987, 6989), False, 'import time\n'), ((7297, 7318), 'numpy.round', 'np.round', (['s', 'round_to'], {}), '(s, round_to)\n', (7305, 7318), True, 'import numpy as np\n'), ((5100, 5114), 'numpy.abs', 'np.abs', (['(gn - g)'], {}), '(gn - g)\n', (5106, 5114), True, 'import numpy as np\n'), ((6759, 6773), 'numpy.array', 'np.array', (['tupo'], {}), '(tupo)\n', (6767, 6773), True, 'import numpy as np\n'), ((7242, 7263), 'numpy.round', 'np.round', (['s', 'round_to'], {}), '(s, round_to)\n', (7250, 7263), True, 'import numpy as np\n'), ((2580, 2591), 'numpy.linalg.det', 'nl.det', (['Z11'], {}), '(Z11)\n', (2586, 2591), True, 'import numpy.linalg as nl\n'), ((2557, 2568), 'time.time', 'time.time', ([], {}), '()\n', (2566, 2568), False, 'import time\n')] |
import collections
import logging
from event_model import DocumentRouter, RunRouter
import numpy
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import matplotlib
from qtpy.QtWidgets import ( # noqa
QLabel,
QWidget,
QVBoxLayout,
)
from traitlets.traitlets import Bool, List, Set
from traitlets.config import Configurable
from .hints import hinted_fields, guess_dimensions # noqa
from .image import LatestFrameImageManager
from ..utils import load_config
matplotlib.use('Qt5Agg') # must set before importing matplotlib.pyplot
import matplotlib.pyplot as plt # noqa
log = logging.getLogger('bluesky_browser')
class LinePlotManager(Configurable):
"""
Manage the line plots for one FigureManager.
"""
omit_single_point_plot = Bool(True, config=True)
def __init__(self, fig_manager, dimensions):
self.update_config(load_config())
self.fig_manager = fig_manager
self.start_doc = None
self.dimensions = dimensions
self.dim_streams = set(stream for _, stream in self.dimensions)
if len(self.dim_streams) > 1:
raise NotImplementedError
def __call__(self, name, start_doc):
self.start_doc = start_doc
return [], [self.subfactory]
def subfactory(self, name, descriptor_doc):
if self.omit_single_point_plot and self.start_doc.get('num_points') == 1:
return []
if len(self.dimensions) > 1:
return [] # This is a job for Grid.
fields = set(hinted_fields(descriptor_doc))
# Filter out the fields with a data type or shape that we cannot
# represent in a line plot.
for field in list(fields):
dtype = descriptor_doc['data_keys'][field]['dtype']
if dtype not in ('number', 'integer'):
fields.discard(field)
ndim = len(descriptor_doc['data_keys'][field]['shape'] or [])
if ndim != 0:
fields.discard(field)
callbacks = []
dim_stream, = self.dim_streams # TODO Handle multiple dim_streams.
if descriptor_doc.get('name') == dim_stream:
dimension, = self.dimensions
x_keys, stream_name = dimension
fields -= set(x_keys)
assert stream_name == dim_stream # TODO Handle multiple dim_streams.
for x_key in x_keys:
figure_label = f'Scalars v {x_key}'
fig = self.fig_manager.get_figure(
('line', x_key, tuple(fields)), figure_label, len(fields), sharex=True)
for y_key, ax in zip(fields, fig.axes):
log.debug('plot %s against %s', y_key, x_key)
ylabel = y_key
y_units = descriptor_doc['data_keys'][y_key].get('units')
ax.set_ylabel(y_key)
if y_units:
ylabel += f' [{y_units}]'
# Set xlabel only on lowest axes, outside for loop below.
def func(event_page, y_key=y_key):
"""
Extract x points and y points to plot out of an EventPage.
This will be passed to LineWithPeaks.
"""
y_data = event_page['data'][y_key]
if x_key == 'time':
t0 = self.start_doc['time']
x_data = numpy.asarray(event_page['time']) - t0
elif x_key == 'seq_num':
x_data = event_page['seq_num']
else:
x_data = event_page['data'][x_key]
return x_data, y_data
line = Line(func, ax=ax)
callbacks.append(line)
if fields:
# Set the xlabel on the bottom-most axis.
if x_key == 'time':
xlabel = x_key
x_units = 's'
elif x_key == 'seq_num':
xlabel = 'sequence number'
x_units = None
else:
xlabel = x_key
x_units = descriptor_doc['data_keys'][x_key].get('units')
if x_units:
xlabel += f' [{x_units}]'
ax.set_xlabel(x_key)
fig.tight_layout()
# TODO Plot other streams against time.
for callback in callbacks:
callback('start', self.start_doc)
callback('descriptor', descriptor_doc)
return callbacks
class Line(DocumentRouter):
"""
Draw a matplotlib Line Arist update it for each Event.
Parameters
----------
func : callable
This must accept an EventPage and return two lists of floats
(x points and y points). The two lists must contain an equal number of
items, but that number is arbitrary. That is, a given document may add
one new point to the plot, no new points, or multiple new points.
label_template : string
This string will be formatted with the RunStart document. Any missing
values will be filled with '?'. If the keyword argument 'label' is
given, this argument will be ignored.
ax : matplotlib Axes, optional
If None, a new Figure and Axes are created.
**kwargs
Passed through to :meth:`Axes.plot` to style Line object.
"""
def __init__(self, func, *, label_template='{scan_id} [{uid:.8}]', ax=None, **kwargs):
self.func = func
if ax is None:
import matplotlib.pyplot as plt
_, ax = plt.subplots()
self.ax = ax
self.line, = ax.plot([], [], **kwargs)
self.x_data = []
self.y_data = []
self.label_template = label_template
self.label = kwargs.get('label')
def start(self, doc):
if self.label is None:
d = collections.defaultdict(lambda: '?')
d.update(**doc)
label = self.label_template.format_map(d)
else:
label = self.label
if label:
self.line.set_label(label)
self.ax.legend(loc='best')
def event_page(self, doc):
x, y = self.func(doc)
self._update(x, y)
def _update(self, x, y):
"""
Takes in new x and y points and redraws plot if they are not empty.
"""
if not len(x) == len(y):
raise ValueError("User function is expected to provide the same "
"number of x and y points. Got {len(x)} x points "
"and {len(y)} y points.")
if not x:
# No new data. Short-circuit.
return
self.x_data.extend(x)
self.y_data.extend(y)
self.line.set_data(self.x_data, self.y_data)
self.ax.relim(visible_only=True)
self.ax.autoscale_view(tight=True)
self.ax.figure.canvas.draw_idle()
class Grid(DocumentRouter):
"""
Draw a matplotlib AxesImage Arist update it for each Event.
The purposes of this callback is to create (on initialization) of a
matplotlib grid image and then update it with new data for every `event`.
NOTE: Some important parameters are fed in through **kwargs like `extent`
which defines the axes min and max and `origin` which defines if the grid
co-ordinates start in the bottom left or top left of the plot. For more
info see https://matplotlib.org/tutorials/intermediate/imshow_extent.html
or https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.imshow.html#matplotlib.axes.Axes.imshow
Parameters
----------
func : callable
This must accept a BulkEvent and return three lists of floats (x
grid co-ordinates, y grid co-ordinates and grid position intensity
values). The three lists must contain an equal number of items, but
that number is arbitrary. That is, a given document may add one new
point, no new points or multiple new points to the plot.
shape : tuple
The (row, col) shape of the grid.
ax : matplotlib Axes, optional.
if ``None``, a new Figure and Axes are created.
**kwargs
Passed through to :meth:`Axes.imshow` to style the AxesImage object.
"""
def __init__(self, func, shape, *, ax=None, **kwargs):
self.func = func
self.shape = shape
if ax is None:
_, ax = plt.subplots()
self.ax = ax
self.grid_data = numpy.full(self.shape, numpy.nan)
self.image, = ax.imshow(self.grid_data, **kwargs)
def event_page(self, doc):
'''
Takes in a bulk_events document and updates grid_data with the values
returned from self.func(doc)
Parameters
----------
doc : dict
The bulk event dictionary that contains the 'data' and 'timestamps'
associated with the bulk event.
Returns
-------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the bulk event.
'''
x_coords, y_coords, I_vals = self.func(doc)
self._update(x_coords, y_coords, I_vals)
def _update(self, x_coords, y_coords, I_vals):
'''
Updates self.grid_data with the values from the lists x_coords,
y_coords, I_vals.
Parameters
----------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the event. The length of all three lists must
be the same.
'''
if not len(x_coords) == len(y_coords) == len(I_vals):
raise ValueError("User function is expected to provide the same "
"number of x, y and I points. Got {0} x points, "
"{1} y points and {2} I values."
"".format(len(x_coords), len(y_coords),
len(I_vals)))
if not x_coords:
# No new data, Short-circuit.
return
# Update grid_data and the plot.
self.grid_data[x_coords, y_coords] = I_vals
self.image.set_array(self.grid_data)
class FigureManager(Configurable):
"""
For a given Viewer, encasulate the matplotlib Figures and associated tabs.
"""
factories = List([
LinePlotManager,
LatestFrameImageManager],
config=True)
enabled = Bool(True, config=True)
exclude_streams = Set([], config=True)
def __init__(self, add_tab):
self.update_config(load_config())
self.add_tab = add_tab
self._figures = {}
def get_figure(self, key, label, *args, **kwargs):
try:
return self._figures[key]
except KeyError:
return self._add_figure(key, label, *args, **kwargs)
def _add_figure(self, key, label, *args, **kwargs):
tab = QWidget()
fig, _ = plt.subplots(*args, **kwargs)
canvas = FigureCanvas(fig)
canvas.setMinimumWidth(640)
canvas.setParent(tab)
toolbar = NavigationToolbar(canvas, tab)
tab_label = QLabel(label)
tab_label.setMaximumHeight(20)
layout = QVBoxLayout()
layout.addWidget(tab_label)
layout.addWidget(canvas)
layout.addWidget(toolbar)
tab.setLayout(layout)
self.add_tab(tab, label)
self._figures[key] = fig
return fig
def __call__(self, name, start_doc):
if not self.enabled:
return [], []
dimensions = start_doc.get('hints', {}).get('dimensions', guess_dimensions(start_doc))
rr = RunRouter(
[factory(self, dimensions) for factory in self.factories])
rr('start', start_doc)
return [rr], []
| [
"logging.getLogger",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"traitlets.traitlets.Set",
"qtpy.QtWidgets.QVBoxLayout",
"matplotlib.use",
"qtpy.QtWidgets.QLabel",
"qtpy.QtWidgets.QWidget",
"numpy.asarray",
"traitlets.traitlets.Bool",
"traitlets.traitlets.List",
"collections.defa... | [((562, 586), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (576, 586), False, 'import matplotlib\n'), ((682, 718), 'logging.getLogger', 'logging.getLogger', (['"""bluesky_browser"""'], {}), "('bluesky_browser')\n", (699, 718), False, 'import logging\n'), ((852, 875), 'traitlets.traitlets.Bool', 'Bool', (['(True)'], {'config': '(True)'}), '(True, config=True)\n', (856, 875), False, 'from traitlets.traitlets import Bool, List, Set\n'), ((10662, 10723), 'traitlets.traitlets.List', 'List', (['[LinePlotManager, LatestFrameImageManager]'], {'config': '(True)'}), '([LinePlotManager, LatestFrameImageManager], config=True)\n', (10666, 10723), False, 'from traitlets.traitlets import Bool, List, Set\n'), ((10763, 10786), 'traitlets.traitlets.Bool', 'Bool', (['(True)'], {'config': '(True)'}), '(True, config=True)\n', (10767, 10786), False, 'from traitlets.traitlets import Bool, List, Set\n'), ((10809, 10829), 'traitlets.traitlets.Set', 'Set', (['[]'], {'config': '(True)'}), '([], config=True)\n', (10812, 10829), False, 'from traitlets.traitlets import Bool, List, Set\n'), ((8707, 8740), 'numpy.full', 'numpy.full', (['self.shape', 'numpy.nan'], {}), '(self.shape, numpy.nan)\n', (8717, 8740), False, 'import numpy\n'), ((11232, 11241), 'qtpy.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (11239, 11241), False, 'from qtpy.QtWidgets import QLabel, QWidget, QVBoxLayout\n'), ((11259, 11288), 'matplotlib.pyplot.subplots', 'plt.subplots', (['*args'], {}), '(*args, **kwargs)\n', (11271, 11288), True, 'import matplotlib.pyplot as plt\n'), ((11306, 11323), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (11318, 11323), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((11408, 11438), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['canvas', 'tab'], {}), '(canvas, tab)\n', (11425, 11438), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((11459, 11472), 'qtpy.QtWidgets.QLabel', 'QLabel', (['label'], {}), '(label)\n', (11465, 11472), False, 'from qtpy.QtWidgets import QLabel, QWidget, QVBoxLayout\n'), ((11530, 11543), 'qtpy.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (11541, 11543), False, 'from qtpy.QtWidgets import QLabel, QWidget, QVBoxLayout\n'), ((5824, 5838), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5836, 5838), True, 'import matplotlib.pyplot as plt\n'), ((6117, 6154), 'collections.defaultdict', 'collections.defaultdict', (["(lambda : '?')"], {}), "(lambda : '?')\n", (6140, 6154), False, 'import collections\n'), ((8646, 8660), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8658, 8660), True, 'import matplotlib.pyplot as plt\n'), ((3536, 3569), 'numpy.asarray', 'numpy.asarray', (["event_page['time']"], {}), "(event_page['time'])\n", (3549, 3569), False, 'import numpy\n')] |
import base64
import datetime
import http.client
import json
import sys
from pyblake2 import blake2b
from flask import Flask, request
from flask import render_template
from util.config import host_config
app = Flask(__name__)
AUTH_SIZE = 16
API_KEY = '<KEY>' # use the provided one
SECRET_KEY = '2a4309a8a2c54e539e5cb57e3a4816d9' # use the provided one; keep this one well protected
def generate(msg):
h = blake2b(digest_size=AUTH_SIZE, key=SECRET_KEY.encode('utf-8'))
h.update(msg.encode('utf-8'))
return h.hexdigest()
def send_response(data):
host = '0.0.0.0'
port = '12892'
path = '/api/external/v1/feedback/flair'
headers = {"Content-type": "application/json", "charset": "utf-8"}
conn = http.client.HTTPConnection(host, port)
conn.request("POST", path, json.JSONEncoder().encode(data), headers)
response = conn.getresponse()
print(response.status, response.reason)
tag_result = response.read()
print(tag_result)
return tag_result
def encode_request_json(text):
data = {
'text': base64.b64encode(text.encode('utf-8')).decode('utf-8'),
'api-key': API_KEY,
'user': '2IFQKT0_1_1',
'case': '1',
'proof': generate(text)
}
print(data)
return data
import json
def encode_response_json(response):
response=json.loads(response)
data={}
content_len=0
reasoning_len=0
if len(response["content"])>0:
content_len=len(response["content"][0])
if len(response["reasoning"])>0:
reasoning_len=len(response["reasoning"][0])
keys_content=["index_c","position_c","word_c","token_string_c","comment_c"]
keys_reasoning=["index_r","position_r","word_r","token_string_c","comment_r"]
for i in range(content_len):
data[keys_content[i]]=" ".join(str(response["content"][0][i])) if type(response["content"][0][i]) is list else response["content"][0][i]
for i in range(reasoning_len):
data[keys_reasoning[i]]=" ".join(str(response["reasoning"][0][i])) if type(response["reasoning"][0][i]) is list else response["reasoning"][0][i]
print(data)
return data
@app.route('/predict', methods=['GET'])
def api_feedback_with_model():
mytext = request.args.get('text', '')
data=encode_request_json(mytext)
tag_result=send_response(data)
mydata=encode_response_json(tag_result)
return render_template('main.html', **mydata)
# ### Host Server ####
if __name__ == '__main__':
env = sys.argv[1] if len(sys.argv) == 2 else 'prod'
print('{0} App will be served in port:{1}....'.format(datetime.datetime.now(), host_config[env]['port']))
print('{0} Loading models...'.format(datetime.datetime.now()))
#preload_models()
print('{0} Models loaded....'.format(datetime.datetime.now()))
print('{0} Serving in port:{1}....'.format(datetime.datetime.now(), host_config[env]['port']))
#app.run(host=host_config[env]['host'], port=host_config[env]['port'], threaded=True)
app.run(debug=True, host='0.0.0.0')
print('started')
| [
"flask.render_template",
"flask.request.args.get",
"json.loads",
"flask.Flask",
"datetime.datetime.now",
"json.JSONEncoder"
] | [((213, 228), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (218, 228), False, 'from flask import Flask, request\n'), ((1338, 1358), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (1348, 1358), False, 'import json\n'), ((2230, 2258), 'flask.request.args.get', 'request.args.get', (['"""text"""', '""""""'], {}), "('text', '')\n", (2246, 2258), False, 'from flask import Flask, request\n'), ((2387, 2425), 'flask.render_template', 'render_template', (['"""main.html"""'], {}), "('main.html', **mydata)\n", (2402, 2425), False, 'from flask import render_template\n'), ((2594, 2617), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2615, 2617), False, 'import datetime\n'), ((2688, 2711), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2709, 2711), False, 'import datetime\n'), ((2777, 2800), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2798, 2800), False, 'import datetime\n'), ((2850, 2873), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2871, 2873), False, 'import datetime\n'), ((808, 826), 'json.JSONEncoder', 'json.JSONEncoder', ([], {}), '()\n', (824, 826), False, 'import json\n')] |
import os
import json
from fair_research_login import NativeClient
CLIENT_ID = 'e54de045-d346-42ef-9fbc-5d466f4a00c6'
APP_NAME = 'My App'
SCOPES = 'openid email profile urn:globus:auth:scope:transfer.api.globus.org:all urn:globus:auth:scope:search.api.globus.org:all'
CONFIG_FILE = 'tokens-data.json'
tokens = None
# try to load tokens from local file (native app config)
client = NativeClient(client_id=CLIENT_ID, app_name=APP_NAME)
try:
tokens = client.load_tokens(requested_scopes=SCOPES)
except:
pass
if not tokens:
# if no tokens, need to start Native App authentication process to get tokens
tokens = client.login(requested_scopes=SCOPES,
refresh_tokens=False)
try:
# save the tokens
client.save_tokens(tokens)
# create environment variable
os.environ['GLOBUS_DATA'] = json.dumps(tokens, indent=4, sort_keys=True)
except:
pass
| [
"json.dumps",
"fair_research_login.NativeClient"
] | [((384, 436), 'fair_research_login.NativeClient', 'NativeClient', ([], {'client_id': 'CLIENT_ID', 'app_name': 'APP_NAME'}), '(client_id=CLIENT_ID, app_name=APP_NAME)\n', (396, 436), False, 'from fair_research_login import NativeClient\n'), ((867, 911), 'json.dumps', 'json.dumps', (['tokens'], {'indent': '(4)', 'sort_keys': '(True)'}), '(tokens, indent=4, sort_keys=True)\n', (877, 911), False, 'import json\n')] |
# Libraries
import ast
import collections
import pandas as pd
# -----------------
# Methods
# -----------------
def invert(d):
if isinstance(d, dict):
return {v: k for k, v in d.items()}
return d
def str2eval(x):
if pd.isnull(x):
return None
return ast.literal_eval(x)
def sortkeys(d):
if isinstance(d, dict):
return collections.OrderedDict(sorted(d.items()))
return d
codes = ['06dx', '13dx', '32dx', '42dx', 'md']
path = "../oucru-{0}/resources/outputs/"
path += "templates/ccfgs_{1}_data_fixed.xlsx"
# Loop
for c in codes:
# Create path
path_tmp = path.format(c, c)
# Read excel
sheets = pd.read_excel(path_tmp, sheet_name=None)
# Loop
for sheet, df in sheets.items():
df.to_replace = df.to_replace.apply(str2eval)
df.to_replace = df.to_replace.apply(invert)
#df.to_replace = df.to_replace.apply(sortkeys)
# Create fullpath
fullpath = path_tmp.replace('.xlsx', '_inverted.xlsx')
# Creating Excel Writer Object from Pandas
writer = pd.ExcelWriter(fullpath, engine='xlsxwriter')
# Save each frame
for sheet, frame in sheets.items():
frame.to_excel(writer, sheet_name=sheet, index=False)
# critical last step
writer.save() | [
"ast.literal_eval",
"pandas.isnull",
"pandas.ExcelWriter",
"pandas.read_excel"
] | [((240, 252), 'pandas.isnull', 'pd.isnull', (['x'], {}), '(x)\n', (249, 252), True, 'import pandas as pd\n'), ((285, 304), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (301, 304), False, 'import ast\n'), ((666, 706), 'pandas.read_excel', 'pd.read_excel', (['path_tmp'], {'sheet_name': 'None'}), '(path_tmp, sheet_name=None)\n', (679, 706), True, 'import pandas as pd\n'), ((1060, 1105), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['fullpath'], {'engine': '"""xlsxwriter"""'}), "(fullpath, engine='xlsxwriter')\n", (1074, 1105), True, 'import pandas as pd\n')] |
import argparse
import twitter
import os
import json
from likes import Likes
import sys
import time
class Downloader:
def __init__(self):
self._current_path = os.path.dirname(os.path.realpath(__file__))
def downloadLikes(self, api, screen_name, force_redownload):
liked_tweets = Likes(
api, screen_name, self._current_path, force_redownload)
liked_tweets.createTable()
liked_tweets.download()
def generateConfig(self):
base = {
"consumer_key": "",
"consumer_secret": "",
"access_token_key": "",
"access_token_secret": "",
}
with open(os.path.join(self._current_path, "config.json"), "w", encoding="utf-8") as f:
json.dump(base, f, ensure_ascii=False, indent=4)
print("Config generated at config.json")
sys.exit()
def main(self):
parser = argparse.ArgumentParser(
description="Download media from liked tweets of a specified user."
)
parser.add_argument(
"-u", "--user", help="Twitter username, @twitter would just be twitter"
)
parser.add_argument(
"--images",
help="Download only images, downloads videos and images by default",
action="store_true",
)
parser.add_argument(
"--videos",
help="Download only videos, downloads videos and images by default",
action="store_true",
)
parser.add_argument(
"-g",
"--generate-config",
help="Generates skeleton config file(config.json), will overwrite existing config if exists",
action="store_true",
)
parser.add_argument(
"-c",
"--config",
help="JSON file containing API keys. Default(config.json) is used if not specified",
)
parser.add_argument(
"-f", "--force", help="Redownloads all media", action="store_true"
)
parser.add_argument(
"-l", "--loop", help="Run forever", action="store_true"
)
args = parser.parse_args()
if args.generate_config:
self.generateConfig()
if not args.user:
print("No user specified, exiting")
sys.exit()
config_name = "config.json"
if args.config:
config_name = args.config
try:
with open(os.path.join(self._current_path, config_name), "r", encoding="utf-8") as f:
config = json.load(f)
api = twitter.Api(
consumer_key=config["consumer_key"],
consumer_secret=config["consumer_secret"],
access_token_key=config["access_token_key"],
access_token_secret=config["access_token_secret"],
sleep_on_rate_limit=True,
tweet_mode="extended",
)
except FileNotFoundError:
raise
except json.decoder.JSONDecodeError:
raise
print(args.loop)
while True:
self.downloadLikes(api, args.user, args.force)
if not args.loop:
break
print(
f"[{time.strftime('%m/%d/%Y %H:%M:%S', time.localtime())}] Running again in 30 minutes")
time.sleep(30*60)
downloader = Downloader()
downloader.main()
| [
"argparse.ArgumentParser",
"likes.Likes",
"os.path.join",
"time.sleep",
"os.path.realpath",
"twitter.Api",
"sys.exit",
"json.load",
"time.localtime",
"json.dump"
] | [((306, 367), 'likes.Likes', 'Likes', (['api', 'screen_name', 'self._current_path', 'force_redownload'], {}), '(api, screen_name, self._current_path, force_redownload)\n', (311, 367), False, 'from likes import Likes\n'), ((862, 872), 'sys.exit', 'sys.exit', ([], {}), '()\n', (870, 872), False, 'import sys\n'), ((911, 1008), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Download media from liked tweets of a specified user."""'}), "(description=\n 'Download media from liked tweets of a specified user.')\n", (934, 1008), False, 'import argparse\n'), ((189, 215), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (205, 215), False, 'import os\n'), ((756, 804), 'json.dump', 'json.dump', (['base', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(base, f, ensure_ascii=False, indent=4)\n', (765, 804), False, 'import json\n'), ((2327, 2337), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2335, 2337), False, 'import sys\n'), ((2605, 2855), 'twitter.Api', 'twitter.Api', ([], {'consumer_key': "config['consumer_key']", 'consumer_secret': "config['consumer_secret']", 'access_token_key': "config['access_token_key']", 'access_token_secret': "config['access_token_secret']", 'sleep_on_rate_limit': '(True)', 'tweet_mode': '"""extended"""'}), "(consumer_key=config['consumer_key'], consumer_secret=config[\n 'consumer_secret'], access_token_key=config['access_token_key'],\n access_token_secret=config['access_token_secret'], sleep_on_rate_limit=\n True, tweet_mode='extended')\n", (2616, 2855), False, 'import twitter\n'), ((3360, 3379), 'time.sleep', 'time.sleep', (['(30 * 60)'], {}), '(30 * 60)\n', (3370, 3379), False, 'import time\n'), ((666, 713), 'os.path.join', 'os.path.join', (['self._current_path', '"""config.json"""'], {}), "(self._current_path, 'config.json')\n", (678, 713), False, 'import os\n'), ((2574, 2586), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2583, 2586), False, 'import json\n'), ((2473, 2518), 'os.path.join', 'os.path.join', (['self._current_path', 'config_name'], {}), '(self._current_path, config_name)\n', (2485, 2518), False, 'import os\n'), ((3298, 3314), 'time.localtime', 'time.localtime', ([], {}), '()\n', (3312, 3314), False, 'import time\n')] |
import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
N, X = na()
S = ns()
up = 0
tmp_S = ""
for c in S[::-1]:
if c == 'U':
up += 1
if c == 'L':
if up:
up -= 1
else:
tmp_S += c
if c == 'R':
if up:
up -= 1
else:
tmp_S += c
tmp_S = 'U' * up + tmp_S[::-1]
S = tmp_S
for c in S:
if c == 'L':
X = 2 * X
if c == 'R':
X = 2 * X + 1
if c == 'U':
X //= 2
print(X) | [
"sys.setrecursionlimit"
] | [((67, 97), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 6)'], {}), '(10 ** 6)\n', (88, 97), False, 'import sys\n')] |
import sqlalchemy
from sqlalchemy.ext.compiler import compiles
import sys
from src.container import create_container
from src.models.base import Base
from src.models import *
@compiles(sqlalchemy.LargeBinary, 'mysql')
def compile_binary_mysql(element, compiler, **kw):
if isinstance(element.length, int) and element.length > 0:
return f'BINARY({element.length})'
else:
return compiler.visit_BLOB(element, **kw)
def main():
if len(sys.argv) == 1:
print('USAGE: python setup.py <CONFIG_PATH>')
exit(0)
config_path = sys.argv[1]
container = create_container(config_path)
db_engine = container.db_engine()
Base.metadata.create_all(db_engine.engine)
print('Ok')
if __name__ == '__main__':
main()
| [
"src.models.base.Base.metadata.create_all",
"src.container.create_container",
"sqlalchemy.ext.compiler.compiles"
] | [((179, 220), 'sqlalchemy.ext.compiler.compiles', 'compiles', (['sqlalchemy.LargeBinary', '"""mysql"""'], {}), "(sqlalchemy.LargeBinary, 'mysql')\n", (187, 220), False, 'from sqlalchemy.ext.compiler import compiles\n'), ((597, 626), 'src.container.create_container', 'create_container', (['config_path'], {}), '(config_path)\n', (613, 626), False, 'from src.container import create_container\n'), ((670, 712), 'src.models.base.Base.metadata.create_all', 'Base.metadata.create_all', (['db_engine.engine'], {}), '(db_engine.engine)\n', (694, 712), False, 'from src.models.base import Base\n')] |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines middleware functions for command line operations.
This allows the ability to provide custom logic either before or after running
an operation by specifying the name of the operation, and then calling the
function that is provided as the first argument and passing the **kwargs
provided.
"""
import logging
import argparse
import boto3
import jmespath
from botocore.exceptions import WaiterError
import pcluster.cli.model
from pcluster.cli.exceptions import APIOperationException, ParameterException
LOGGER = logging.getLogger(__name__)
def _cluster_status(cluster_name):
controller = "cluster_operations_controller"
func_name = "describe_cluster"
full_func_name = f"pcluster.api.controllers.{controller}.{func_name}"
return pcluster.cli.model.call(full_func_name, cluster_name=cluster_name)
def add_additional_args(parser_map):
"""Add any additional arguments to parsers for individual operations.
NOTE: these additional arguments will also need to be removed before
calling the underlying function for the situation where they are not a part
of the specification.
"""
parser_map["create-cluster"].add_argument("--wait", action="store_true", help=argparse.SUPPRESS)
parser_map["delete-cluster"].add_argument("--wait", action="store_true", help=argparse.SUPPRESS)
parser_map["update-cluster"].add_argument("--wait", action="store_true", help=argparse.SUPPRESS)
def middleware_hooks():
"""Return a map and from operation to middleware functions.
The map has operation names as the keys and functions as values.
"""
return {"create-cluster": create_cluster, "delete-cluster": delete_cluster, "update-cluster": update_cluster}
def queryable(func):
def wrapper(dest_func, _body, kwargs):
query = kwargs.pop("query", None)
ret = func(dest_func, _body, kwargs)
try:
return jmespath.search(query, ret) if query else ret
except jmespath.exceptions.ParseError:
raise ParameterException({"message": "Invalid query string.", "query": query})
return wrapper
@queryable
def update_cluster(func, _body, kwargs):
wait = kwargs.pop("wait", False)
ret = func(**kwargs)
if wait and not kwargs.get("dryrun"):
cloud_formation = boto3.client("cloudformation")
waiter = cloud_formation.get_waiter("stack_update_complete")
try:
waiter.wait(StackName=kwargs["cluster_name"])
except WaiterError as e:
LOGGER.error("Failed when waiting for cluster update with error: %s", e)
raise APIOperationException(_cluster_status(kwargs["cluster_name"]))
ret = _cluster_status(kwargs["cluster_name"])
return ret
@queryable
def create_cluster(func, body, kwargs):
wait = kwargs.pop("wait", False)
ret = func(**kwargs)
if wait and not kwargs.get("dryrun"):
cloud_formation = boto3.client("cloudformation")
waiter = cloud_formation.get_waiter("stack_create_complete")
try:
waiter.wait(StackName=body["clusterName"])
except WaiterError as e:
LOGGER.error("Failed when waiting for cluster creation with error: %s", e)
raise APIOperationException(_cluster_status(body["clusterName"]))
ret = _cluster_status(body["clusterName"])
return ret
@queryable
def delete_cluster(func, _body, kwargs):
wait = kwargs.pop("wait", False)
ret = func(**kwargs)
if wait:
cloud_formation = boto3.client("cloudformation")
waiter = cloud_formation.get_waiter("stack_delete_complete")
try:
waiter.wait(StackName=kwargs["cluster_name"])
except WaiterError as e:
LOGGER.error("Failed when waiting for cluster deletion with error: %s", e)
raise APIOperationException({"message": f"Failed when deleting cluster '{kwargs['cluster_name']}'."})
return {"message": f"Successfully deleted cluster '{kwargs['cluster_name']}'."}
else:
return ret
| [
"logging.getLogger",
"boto3.client",
"pcluster.cli.exceptions.APIOperationException",
"pcluster.cli.exceptions.ParameterException",
"jmespath.search"
] | [((1095, 1122), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1112, 1122), False, 'import logging\n'), ((2855, 2885), 'boto3.client', 'boto3.client', (['"""cloudformation"""'], {}), "('cloudformation')\n", (2867, 2885), False, 'import boto3\n'), ((3477, 3507), 'boto3.client', 'boto3.client', (['"""cloudformation"""'], {}), "('cloudformation')\n", (3489, 3507), False, 'import boto3\n'), ((4064, 4094), 'boto3.client', 'boto3.client', (['"""cloudformation"""'], {}), "('cloudformation')\n", (4076, 4094), False, 'import boto3\n'), ((2467, 2494), 'jmespath.search', 'jmespath.search', (['query', 'ret'], {}), '(query, ret)\n', (2482, 2494), False, 'import jmespath\n'), ((2578, 2650), 'pcluster.cli.exceptions.ParameterException', 'ParameterException', (["{'message': 'Invalid query string.', 'query': query}"], {}), "({'message': 'Invalid query string.', 'query': query})\n", (2596, 2650), False, 'from pcluster.cli.exceptions import APIOperationException, ParameterException\n'), ((4373, 4472), 'pcluster.cli.exceptions.APIOperationException', 'APIOperationException', (['{\'message\': f"Failed when deleting cluster \'{kwargs[\'cluster_name\']}\'."}'], {}), '({\'message\':\n f"Failed when deleting cluster \'{kwargs[\'cluster_name\']}\'."})\n', (4394, 4472), False, 'from pcluster.cli.exceptions import APIOperationException, ParameterException\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from numpy.testing import assert_almost_equal
from mmpose.models import build_loss
from mmpose.models.utils.geometry import batch_rodrigues
def test_mesh_loss():
"""test mesh loss."""
loss_cfg = dict(
type='MeshLoss',
joints_2d_loss_weight=1,
joints_3d_loss_weight=1,
vertex_loss_weight=1,
smpl_pose_loss_weight=1,
smpl_beta_loss_weight=1,
img_res=256,
focal_length=5000)
loss = build_loss(loss_cfg)
smpl_pose = torch.zeros([1, 72], dtype=torch.float32)
smpl_rotmat = batch_rodrigues(smpl_pose.view(-1, 3)).view(-1, 24, 3, 3)
smpl_beta = torch.zeros([1, 10], dtype=torch.float32)
camera = torch.tensor([[1, 0, 0]], dtype=torch.float32)
vertices = torch.rand([1, 6890, 3], dtype=torch.float32)
joints_3d = torch.ones([1, 24, 3], dtype=torch.float32)
joints_2d = loss.project_points(joints_3d, camera) + (256 - 1) / 2
fake_pred = {}
fake_pred['pose'] = smpl_rotmat
fake_pred['beta'] = smpl_beta
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices
fake_pred['joints_3d'] = joints_3d
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.))
fake_pred = {}
fake_pred['pose'] = smpl_rotmat + 1
fake_pred['beta'] = smpl_beta + 1
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices + 1
fake_pred['joints_3d'] = joints_3d.clone()
joints_3d_t = joints_3d.clone()
joints_3d_t[:, 0] = joints_3d_t[:, 0] + 1
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d_t
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d + (256 - 1) / 2
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(1.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.5 / 24))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.5))
def test_gan_loss():
"""test gan loss."""
with pytest.raises(NotImplementedError):
loss_cfg = dict(
type='GANLoss',
gan_type='test',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1)
_ = build_loss(loss_cfg)
input_1 = torch.ones(1, 1)
input_2 = torch.ones(1, 3, 6, 6) * 2
# vanilla
loss_cfg = dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_1, True, is_disc=False)
assert_almost_equal(loss.item(), 0.6265233)
loss = gan_loss(input_1, False, is_disc=False)
assert_almost_equal(loss.item(), 2.6265232)
loss = gan_loss(input_1, True, is_disc=True)
assert_almost_equal(loss.item(), 0.3132616)
loss = gan_loss(input_1, False, is_disc=True)
assert_almost_equal(loss.item(), 1.3132616)
# lsgan
loss_cfg = dict(
type='GANLoss',
gan_type='lsgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), 2.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 8.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 1.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 4.0)
# wgan
loss_cfg = dict(
type='GANLoss',
gan_type='wgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 4)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), -2.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 2.0)
# hinge
loss_cfg = dict(
type='GANLoss',
gan_type='hinge',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 0.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 3.0)
| [
"mmpose.models.build_loss",
"torch.tensor",
"pytest.raises",
"torch.zeros",
"torch.rand",
"torch.ones"
] | [((534, 554), 'mmpose.models.build_loss', 'build_loss', (['loss_cfg'], {}), '(loss_cfg)\n', (544, 554), False, 'from mmpose.models import build_loss\n'), ((572, 613), 'torch.zeros', 'torch.zeros', (['[1, 72]'], {'dtype': 'torch.float32'}), '([1, 72], dtype=torch.float32)\n', (583, 613), False, 'import torch\n'), ((706, 747), 'torch.zeros', 'torch.zeros', (['[1, 10]'], {'dtype': 'torch.float32'}), '([1, 10], dtype=torch.float32)\n', (717, 747), False, 'import torch\n'), ((761, 807), 'torch.tensor', 'torch.tensor', (['[[1, 0, 0]]'], {'dtype': 'torch.float32'}), '([[1, 0, 0]], dtype=torch.float32)\n', (773, 807), False, 'import torch\n'), ((823, 868), 'torch.rand', 'torch.rand', (['[1, 6890, 3]'], {'dtype': 'torch.float32'}), '([1, 6890, 3], dtype=torch.float32)\n', (833, 868), False, 'import torch\n'), ((885, 928), 'torch.ones', 'torch.ones', (['[1, 24, 3]'], {'dtype': 'torch.float32'}), '([1, 24, 3], dtype=torch.float32)\n', (895, 928), False, 'import torch\n'), ((1342, 1376), 'torch.ones', 'torch.ones', (['(1)'], {'dtype': 'torch.float32'}), '(1, dtype=torch.float32)\n', (1352, 1376), False, 'import torch\n'), ((1449, 1492), 'torch.ones', 'torch.ones', (['[1, 24, 1]'], {'dtype': 'torch.float32'}), '([1, 24, 1], dtype=torch.float32)\n', (1459, 1492), False, 'import torch\n'), ((1565, 1608), 'torch.ones', 'torch.ones', (['[1, 24, 1]'], {'dtype': 'torch.float32'}), '([1, 24, 1], dtype=torch.float32)\n', (1575, 1608), False, 'import torch\n'), ((2439, 2473), 'torch.ones', 'torch.ones', (['(1)'], {'dtype': 'torch.float32'}), '(1, dtype=torch.float32)\n', (2449, 2473), False, 'import torch\n'), ((2548, 2591), 'torch.ones', 'torch.ones', (['[1, 24, 1]'], {'dtype': 'torch.float32'}), '([1, 24, 1], dtype=torch.float32)\n', (2558, 2591), False, 'import torch\n'), ((2680, 2723), 'torch.ones', 'torch.ones', (['[1, 24, 1]'], {'dtype': 'torch.float32'}), '([1, 24, 1], dtype=torch.float32)\n', (2690, 2723), False, 'import torch\n'), ((3431, 3447), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {}), '(1, 1)\n', (3441, 3447), False, 'import torch\n'), ((3673, 3693), 'mmpose.models.build_loss', 'build_loss', (['loss_cfg'], {}), '(loss_cfg)\n', (3683, 3693), False, 'from mmpose.models import build_loss\n'), ((4266, 4286), 'mmpose.models.build_loss', 'build_loss', (['loss_cfg'], {}), '(loss_cfg)\n', (4276, 4286), False, 'from mmpose.models import build_loss\n'), ((4833, 4853), 'mmpose.models.build_loss', 'build_loss', (['loss_cfg'], {}), '(loss_cfg)\n', (4843, 4853), False, 'from mmpose.models import build_loss\n'), ((5402, 5422), 'mmpose.models.build_loss', 'build_loss', (['loss_cfg'], {}), '(loss_cfg)\n', (5412, 5422), False, 'from mmpose.models import build_loss\n'), ((1697, 1714), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (1709, 1714), False, 'import torch\n'), ((1767, 1784), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (1779, 1784), False, 'import torch\n'), ((1837, 1854), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (1849, 1854), False, 'import torch\n'), ((1907, 1924), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (1919, 1924), False, 'import torch\n'), ((1977, 1994), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (1989, 1994), False, 'import torch\n'), ((2812, 2829), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (2824, 2829), False, 'import torch\n'), ((2882, 2899), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (2894, 2899), False, 'import torch\n'), ((2952, 2969), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (2964, 2969), False, 'import torch\n'), ((3022, 3044), 'torch.tensor', 'torch.tensor', (['(0.5 / 24)'], {}), '(0.5 / 24)\n', (3034, 3044), False, 'import torch\n'), ((3098, 3115), 'torch.tensor', 'torch.tensor', (['(0.5)'], {}), '(0.5)\n', (3110, 3115), False, 'import torch\n'), ((3174, 3208), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (3187, 3208), False, 'import pytest\n'), ((3395, 3415), 'mmpose.models.build_loss', 'build_loss', (['loss_cfg'], {}), '(loss_cfg)\n', (3405, 3415), False, 'from mmpose.models import build_loss\n'), ((3462, 3484), 'torch.ones', 'torch.ones', (['(1)', '(3)', '(6)', '(6)'], {}), '(1, 3, 6, 6)\n', (3472, 3484), False, 'import torch\n')] |
from operator import is_
from ._helper import _UnhashableFriendlyDict, _LinkedList, _is_iterable_non_string, Rangelike
from .Range import Range
from .RangeSet import RangeSet
from typing import Iterable, Union, Any, TypeVar, List, Tuple, Dict, Tuple
T = TypeVar('T', bound=Any)
V = TypeVar('V', bound=Any)
class RangeDict:
"""
A class representing a dict-like structure where continuous ranges
correspond to certain values. For any item given to lookup, the
value obtained from a RangeDict will be the one corresponding to
the first range into which the given item fits. Otherwise, RangeDict
provides a similar interface to python's built-in dict.
A RangeDict can be constructed in one of four ways:
>>> # Empty
>>> a = RangeDict()
>>> # From an existing RangeDict object
>>> b = RangeDict(a)
>>> # From a dict that maps Ranges to values
>>> c = RangeDict({
... Range('a', 'h'): "First third of the lowercase alphabet",
... Range('h', 'p'): "Second third of the lowercase alphabet",
... Range('p', '{'): "Final third of the lowercase alphabet",
... })
>>> print(c['brian']) # First third of the lowercase alphabet
>>> print(c['king arthur']) # Second third of the lowercase alphabet
>>> print(c['python']) # Final third of the lowercase alphabet
>>> # From an iterable of 2-tuples, like a regular dict
>>> d = RangeDict([
... (Range('A', 'H'), "First third of the uppercase alphabet"),
... (Range('H', 'P'), "Second third of the uppercase alphabet"),
... (Range('P', '['), "Final third of the uppercase alphabet"),
... ])
A RangeDict cannot be constructed from an arbitrary number of positional
arguments or keyword arguments.
RangeDicts are mutable, so new range correspondences can be added
at any time, with Ranges or RangeSets acting like the keys in a
normal dict/hashtable. New keys must be of type Range or RangeSet,
or they must be able to be coerced into a RangeSet. Given
keys are also copied before they are added to a RangeDict.
Adding a new range that overlaps with an existing range will
make it so that the value returned for any given number will be
the one corresponding to the most recently-added range in which
it was found (Ranges are compared by `start`, `include_start`, `end`,
and `include_end` in that priority order). Order of insertion is
important.
The RangeDict constructor, and the `.update()` method, insert elements
in order from the iterable they came from. As of python 3.7+, dicts
retain the insertion order of their arguments, and iterate in that
order - this is respected by this data structure. Other iterables,
like lists and tuples, have order built-in. Be careful about using
sets as arguments, since they have no guaranteed order.
Be very careful about adding a range from -infinity to +infinity.
If defined using the normal Range constructor without any start/end
arguments, then that Range will by default accept any value (see
Range's documentation for more info). However, the first non-infinite
Range added to the RangeDict will overwrite part of the infinite Range,
and turn it into a Range of that type only. As a result, other types
that the infinite Range may have accepted before, will no longer work:
>>> e = RangeDict({Range(include_end=True): "inquisition"})
>>> print(e) # {{[-inf, inf)}: inquisition}
>>> print(e.get(None)) # inquisition
>>> print(e.get(3)) # inquisition
>>> print(e.get("holy")) # inquisition
>>> print(e.get("spanish")) # inquisition
>>>
>>> e[Range("a", "m")] = "grail"
>>>
>>> print(e) # {{[-inf, a), [m, inf)}: inquisition, {[a, m)}: grail}
>>> print(e.get("spanish")) # inquisition
>>> print(e.get("holy")) # grail
>>> print(e.get(3)) # KeyError
>>> print(e.get(None)) # KeyError
In general, unless something has gone wrong, the RangeDict will not
include any empty ranges. Values will disappear if there are not
any keys that map to them. Adding an empty Range to the RangeDict
will not trigger an error, but will have no effect.
By default, the range set will determine value uniqueness by equality
(`==`), not by identity (`is`), and multiple rangekeys pointing to the
same value will be compressed into a single RangeSet pointed at a
single value. This is mainly meaningful for values that are mutable,
such as `list`s or `set`s.
If using assignment operators besides the generic `=` (`+=`, `|=`, etc.)
on such values, be warned that the change will reflect upon the entire
rangeset.
>>> # [{3}] == [{3}] is True, so the two ranges are made to point to the same object
>>> f = RangeDict({Range(1, 2): {3}, Range(4, 5): {3}})
>>> print(f) # {{[1, 2), [4, 5)}: {3}}
>>>
>>> # f[1] returns the {3}. When |= is used, this object changes to {3, 4}
>>> f[Range(1, 2)] |= {4}
>>> # since the entire rangeset is pointing at the same object, the entire range changes
>>> print(f) # {{[1, 2), [4, 5)}: {3, 4}}
This is because `dict[value] = newvalue` calls `dict.__setitem__()`, whereas
`dict[value] += item` instead calls `dict[value].__iadd__()` instead.
To make the RangeDict use identity comparison instead, construct it with the
keyword argument `identity=True`, which should help:
>>> # `{3} is {3}` is False, so the two ranges don't coalesce
>>> g = RangeDict({Range(1, 2): {3}, Range(4, 5): {3}}, identity=True)
>>> print(g) # {{[1, 2)}: {3}, {[4, 5)}: {3}}
To avoid the problem entirely, you can also simply not mutate mutable values
that multiple rangekeys may refer to, substituting non-mutative operations:
>>> h = RangeDict({Range(1, 2): {3}, Range(4, 5): {3}})
>>> print(h) # {{[1, 2), [4, 5)}: {3}}
>>> h[Range(1, 2)] = h[Range(1, 2)] | {4}
>>> print(h) # {{[4, 5)}: {3}, {[1, 2)}: {3, 4}}
"""
# sentinel for checking whether an arg was passed, where anything is valid including None
_sentinel = object()
def __init__(self, iterable: Union['RangeDict', Dict[Rangelike, V], Iterable[Tuple[Rangelike, V]]] = _sentinel,
*, identity=False):
"""
Initialize a new RangeDict from the given iterable. The given iterable
may be either a RangeDict (in which case, a copy will be created),
a regular dict with all keys able to be converted to Ranges, or an
iterable of 2-tuples (range, value).
If the argument `identity=True` is given, the RangeDict will use `is` instead
of `==` when it compares multiple rangekeys with the same associated value to
possibly merge them.
:param iterable: Optionally, an iterable from which to source keys - either a RangeDict, a regular dict
with Rangelike objects as keys, or an iterable of (range, value) tuples.
:param identity: optionally, a toggle to use identity instead of equality when determining key-value
similarity. By default, uses equality, but will use identity instead if True is passed.
"""
# Internally, RangeDict has two data structures
# _values is a dict {value: [rangeset, ...], ..., '_sentinel': [(value: [rangeset, ...]), ...]}
# The sentinel allows the RangeDict to accommodate unhashable types.
# _ranges is a list-of-lists, [[(intrangeset1, value1), (intrangeset2, value2), ...],
# [(strrangeset1, value1), (strrangeset2, value2), ...],
# ...]
# where each inner list is a list of (RangeSet, corresponding_value) tuples.
# Each inner list corresponds to a different, mutually-incomparable, type of Range.
# We use _values to cross-reference with while adding new ranges, to avoid having to search the entire
# _ranges for the value we want to point to.
# Meanwhile, _ranges is a list-of-lists instead of just a list, so that we can accommodate ranges of
# different types (e.g. a RangeSet of ints and a RangeSet of strings) pointing to the same values.
self._values = _UnhashableFriendlyDict()
if identity:
self._values._operator = is_
if iterable is RangeDict._sentinel:
self._rangesets = _LinkedList()
elif isinstance(iterable, RangeDict):
self._values.update({val: rngsets[:] for val, rngsets in iterable._values.items()})
self._rangesets = _LinkedList([rngset.copy() for rngset in iterable._rangesets])
elif isinstance(iterable, dict):
self._rangesets = _LinkedList()
for rng, val in iterable.items():
if _is_iterable_non_string(rng):
for r in rng:
self.add(r, val)
else:
self.add(rng, val)
else:
try:
assert(_is_iterable_non_string(iterable)) # creative method of avoiding code reuse!
self._rangesets = _LinkedList()
for rng, val in iterable:
# this should not produce an IndexError. It produces a TypeError instead.
# (or a ValueError in case of too many to unpack. Which is fine because it screens for 3-tuples)
if _is_iterable_non_string(rng):
# this allows constructing with e.g. rng=[Range(1, 2), Range('a', 'b')], which makes sense
for r in rng:
self.add(r, val)
else:
self.add(rng, val)
except (TypeError, ValueError, AssertionError):
raise ValueError("Expected a dict, RangeDict, or iterable of 2-tuples")
self._values[RangeDict._sentinel] = []
self.popempty()
def add(self, rng: Rangelike, value: V) -> None:
"""
Add the single given Range/RangeSet to correspond to the given value.
If the given Range overlaps with a Range that is already contained
within this RangeDict, then the new range takes precedence.
To add multiple Ranges of the same type, pack them into a RangeSet
and pass that.
To add a list of multiple Ranges of different types, use `.update()`
instead. Using this method instead will produce a `TypeError`.
If an empty Range is given, then this method does nothing.
:param rng: Rangekey to add
:param value: value to add corresponding to the given Rangekey
"""
# copy the range and get it into an easy-to-work-with form
try:
rng = RangeSet(rng)
except TypeError:
raise TypeError("argument 'rng' for .add() must be able to be converted to a RangeSet")
if rng.isempty():
return
# first, remove this range from any existing range
short_circuit = False
for rngsetlist in self._rangesets:
# rngsetlist is a tuple (_LinkedList(ranges), value)
for rngset in rngsetlist:
# rngset
try:
rngset[0].discard(rng)
short_circuit = True # (naively) assume only one type of rngset will be compatible
except TypeError:
pass
if short_circuit:
self.popempty()
break
# then, add it back in depending on whether it shares an existing value or not.
if value in self._values:
# duplicate value. More than one range must map to it.
existing_rangesets = self._values[value]
# existing_rangesets is a list (not _LinkedList) of RangeSets that correspond to value.
# if there's already a whole RangeSet pointing to value, then simply add to that RangeSet
for rngset in existing_rangesets:
try:
# ...once we find the RangeSet of the right type
rngset.add(rng)
# And then bubble it into place in whichever _LinkedList would have contained it.
# This is one empty list traversal for every non-modified _LinkedList, and one gnomesort
# for the one we really want. A little time loss but not that much. Especially not
# any extra timeloss for single-typed RangeDicts.
self._sort_ranges()
# And short-circuit, since we've already dealt with the complications and don't need to
# do any further modification of _values or _rangesets
return
except TypeError:
pass
# if we didn't find a RangeSet of the right type, then we must add rng as a new RangeSet of its own type.
# add a reference in _values
self._values[value].append(rng)
else:
# new value. This is easy, we just need to add a value for it:
self._values[value] = [rng]
# Now that we've added our new RangeSet into _values, we need to make sure it's accounted for in _rangesets
# we will first try to insert it into all our existing rangesets
for rngsetlist in self._rangesets:
# rngsetlist is a _LinkedList of (RangeSet, value) tuples
# [(rangeset0, value0), (rangeset1, value1), ...]
try:
# "try" == "assess comparability with the rest of the RangeSets in this _LinkedList".
# This is checked via trying to execute a dummy comparison with the first RangeSet in this category,
# and seeing if it throws a TypeError.
# Though it's kinda silly, this is probably the best way to handle this. See:
# https://stackoverflow.com/q/57717100/2648811
_ = rng < rngsetlist[0][0]
# If it doesn't raise an error, then it's comparable and we're good.
# Add it, bubble it to sorted order via .gnomesort(), and return.
rngsetlist.append((rng, value))
rngsetlist.gnomesort()
return
except TypeError:
pass
# if no existing rangeset accepted it, then we need to add one.
# singleton _LinkedList containing just (rng, value), appended to self._rangesets
self._rangesets.append(_LinkedList(((rng, value),)))
def update(self, iterable: Union['RangeDict', Dict[Rangelike, V], Iterable[Tuple[Rangelike, V]]]) -> None:
"""
Adds the contents of the given iterable (either another RangeDict, a
`dict` mapping Range-like objects to values, or a list of 2-tuples
`(range-like, value)`) to this RangeDict.
:param iterable: An iterable containing keys and values to add to this RangeDict
"""
# coerce to RangeDict and add that
if not isinstance(iterable, RangeDict):
iterable = RangeDict(iterable)
for value, rangesets in iterable._values.items():
for rngset in rangesets:
self.add(rngset, value)
def getitem(self, item: T) -> Tuple[List[RangeSet], RangeSet, Range, V]:
"""
Returns both the value corresponding to the given item, the Range
containing it, and the set of other contiguous ranges that would
have also yielded the same value, as a 4-tuple
`([RangeSet1, Rangeset2, ...], RangeSet, Range, value)`.
In reverse order, that is
- the value corresponding to item
- the single continuous range directly containing the item
- the RangeSet directly containing the item and corresponding
to the value
- a list of all RangeSets (of various non-mutually-comparable
types) that all correspond to the value. Most of the time,
this will be a single-element list, if only one type of Range
is used in the RangeDict. Otherwise, if ranges of multiple
types (e.g. int ranges, string ranges) correspond to the same
value, this list will contain all of them.
Using `.get()`, `.getrange()`, `.getrangeset()`, or
`.getrangesets()` to isolate just one of those return values is
usually easier. This method is mainly used internally.
Raises a `KeyError` if the desired item is not found.
:param item: item to search for
:return: a 4-tuple (keys with same value, containing RangeSet, containing Range, value)
"""
for rngsets in self._rangesets:
# rngsets is a _LinkedList of (RangeSet, value) tuples
for rngset, value in rngsets:
try:
rng = rngset.getrange(item)
return self._values[value], rngset, rng, value
except IndexError:
# try RangeSets of the same type, corresponding to other values
continue
except TypeError:
# try RangeSets of a different type
break
raise KeyError(f"'{item}' was not found in any range")
def getrangesets(self, item: T) -> List[RangeSet]:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns a list of all RangeSets in this RangeDict that
correspond to that value.
Most of the time, this will be a single-element list, if only one
type of Range is used in the RangeDict. Otherwise, if ranges of
multiple types (e.g. int ranges, string ranges) correspond to the
same value, this list will contain all of them.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: all RangeSets in this RangeDict that correspond to the same value as the given item
"""
return self.getitem(item)[0]
def getrangeset(self, item: T) -> RangeSet:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns the RangeSet containing the given item that
corresponds to that value.
To find other RangeSets of other types that correspond to the same
value, use `.getrangesets()` instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: the RangeSet key containing the given item
"""
return self.getitem(item)[1]
def getrange(self, item: T) -> Range:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns the single contiguous range containing the given item
that corresponds to that value.
To find the RangeSet of all Ranges that correspond to that item,
use `.getrangeset()` instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: the Range most directly containing the given item
"""
return self.getitem(item)[2]
def get(self, item: T, default: Any = _sentinel) -> Union[V, Any]:
"""
Returns the value corresponding to the given item, based on
the most recently-added Range containing it.
The `default` argument is optional.
Like Python's built-in `dict`, if `default` is given, returns that if
`item` is not found.
Otherwise, raises a `KeyError`.
:param item: item to search for
:param default: optionally, a value to return, if item is not found
(if not provided, raises a KeyError if not found)
:return: the value corrsponding to the item, or default if item is not found
"""
try:
return self.getitem(item)[3]
except KeyError:
if default is not RangeDict._sentinel:
return default
raise
def getoverlapitems(self, rng: Rangelike) -> List[Tuple[List[RangeSet], RangeSet, V]]:
"""
Returns a list of 3-tuples
[([RangeSet1, ...], RangeSet, value), ...]
corresponding to every distinct rangekey of this RangeDict that
overlaps the given range.
In reverse order, for each tuple, that is
- the value corresponding to the rangeset
- the RangeSet corresponding to the value that intersects the given range
- a list of all RangeSets (of various non-mutually-comparable
types) that all correspond to the value. Most of the time,
this will be a single-element list, if only one type of Range
is used in the RangeDict. Otherwise, if ranges of multiple
types (e.g. int ranges, string ranges) correspond to the same
value, this list will contain all of them.
Using `.getoverlap()`, `.getoverlapranges()`, or
`.getoverlaprangesets()`
to isolate just one of those return values is
usually easier. This method is mainly used internally.
:param rng: Rangelike to search for
:return: a list of 3-tuples (Rangekeys with same value, containing RangeSet, value)
"""
ret = []
for rngsets in self._rangesets:
# rngsets is a _LinkedList of (RangeSet, value) tuples
for rngset, value in rngsets:
try:
if rngset.intersection(rng):
ret.append((self._values[value], rngset, value))
except TypeError:
break
# do NOT except ValueError - if `rng` is not rangelike, then error should be thrown.
return ret
def getoverlap(self, rng: Rangelike) -> List[V]:
"""
Returns a list of values corresponding to every distinct
rangekey of this RangeDict that overlaps the given range.
:param rng: Rangelike to search for
:return: a list of values corresponding to each rangekey intersected by rng
"""
return [t[2] for t in self.getoverlapitems(rng)]
def getoverlapranges(self, rng: Rangelike) -> List[RangeSet]:
"""
Returns a list of all rangekeys in this RangeDict that intersect with
the given range.
:param rng: Rangelike to search for
:return: a list of all RangeSet rangekeys intersected by rng
"""
return [t[1] for t in self.getoverlapitems(rng)]
def getoverlaprangesets(self, rng: Rangelike) -> List[List[RangeSet]]:
"""
Returns a list of RangeSets corresponding to the same value as every
rangekey that intersects the given range.
:param rng: Rangelike to search for
:return: a list lists of rangesets that correspond to the same values as every rangekey intersected by rng
"""
return [t[0] for t in self.getoverlapitems(rng)]
def getvalue(self, value: V) -> List[RangeSet]:
"""
Returns the list of RangeSets corresponding to the given value.
Raises a `KeyError` if the given value is not corresponded to by
any RangeSets in this RangeDict.
:param value: value to search for
:return: a list of rangekeys that correspond to the given value
"""
try:
return self._values[value]
except KeyError:
raise KeyError(f"value '{value}' is not present in this RangeDict")
def set(self, item: T, new_value: V) -> V:
"""
Changes the value corresponding to the given `item` to the given
`new_value`, such that all ranges corresponding to the old value
now correspond to the `new_value` instead.
Returns the original, overwritten value.
If the given item is not found, raises a `KeyError`.
:param item: item to search for
:param new_value: value to set for all rangekeys sharing the same value as item corresponds to
:return: the previous value those rangekeys corresponded to
"""
try:
old_value = self.get(item)
except KeyError:
raise KeyError(f"Item '{item}' is not in any Range in this RangeDict")
self.setvalue(old_value, new_value)
return old_value
def setvalue(self, old_value: V, new_value: V) -> None:
"""
Changes all ranges corresponding to the given `old_value` to correspond
to the given `new_value` instead.
Raises a `KeyError` if the given `old_value` isn't found.
:param old_value: value to change for all keys that correspond to it
:param new_value: value to replace it with
"""
try:
rangesets = list(self._values[old_value])
except KeyError:
raise KeyError(f"Value '{old_value}' is not in this RangeDict")
for rngset in rangesets:
self.add(rngset, new_value)
def popitem(self, item: T) -> Tuple[List[RangeSet], RangeSet, Range, V]:
"""
Returns the value corresponding to the given item, the Range containing
it, and the set of other contiguous ranges that would have also yielded
the same value, as a 4-tuple
`([RangeSet1, Rangeset2, ...], RangeSet, Range, value)`.
In reverse order, that is
- the value corresponding to item
- the single continuous range directly containing the item
- the RangeSet directly containing the item and corresponding to the
value
- a list of all RangeSets (of various non-mutually-comparable types)
that all correspond to the value. Most of the time, this will be a
single-element list, if only one type of Range is used in the
RangeDict. Otherwise, if ranges of multiple types (e.g. int ranges,
string ranges) correspond to the same value, this list will contain
all of them.
Also removes all of the above from this RangeDict.
While this method is used a lot internally, it's usually easier to
simply use `.pop()`, `.poprange()`, `.poprangeset()`, or
`.poprangesets()` to get the single item of interest.
Raises a KeyError if the desired item is not found.
:param item: item to search for
:return: a 4-tuple (keys with same value, containing RangeSet, containing Range, value)
"""
# search for item linked list-style
for rngsetlist in self._rangesets:
# rngsetlist is a _LinkedList of (RangeSet, value) tuples
cur = rngsetlist.first
while cur:
try:
rng = cur.value[0].getrange(item)
rngsetlist.pop_node(cur)
rngsets = self._values.pop(cur.value[1])
self.popempty()
return rngsets, cur.value[0], rng, cur.value[1]
except IndexError:
# try the next range correspondence
cur = cur.next
continue
except TypeError:
# try ranges of a different type
break
raise KeyError(f"'{item}' was not found in any range")
def poprangesets(self, item: T) -> List[RangeSet]:
"""
Finds the value to which the given item corresponds, and returns the
list of RangeSets that correspond to that value (see
`.getrangesets()`).
Also removes the value, and all RangeSets from this RangeDict. To
remove just one range and leave the rest intact, use `.remove()`
instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: all RangeSets in this RangeDict that correspond to the same value as the given item
"""
return self.popitem(item)[0]
def poprangeset(self, item: T) -> RangeSet:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns the RangeSet containing the given item that
corresponds to that value.
Also removes the value and all ranges that correspond to it from this
RangeDict. To remove just one range and leave the rest intact, use
`.remove()` instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: the RangeSet key containing the given item
"""
return self.popitem(item)[1]
def poprange(self, item: T) -> Range:
"""
Finds the value to which the given item corresponds in this RangeDict,
and then returns the single contiguous range containing the given item
that corresponds to that value.
Also removes the value and all ranges that correspond to it from this
RangeDict. To remove just one range and leave the rest intact, use
`.remove()` instead.
Raises a `KeyError` if the given item is not found.
:param item: item to search for
:return: the Range containing the given item
"""
return self.popitem(item)[2]
def pop(self, item: T, default: Any = _sentinel) -> Union[V, Any]:
"""
Returns the value corresponding to the most recently-added range that
contains the given item. Also removes the returned value and all
ranges corresponding to it from this RangeDict.
The argument `default` is optional, just like in python's built-in
`dict.pop()`, if default is given, then if the item is not found,
returns that instead.
Otherwise, raises a `KeyError`.
:param item: item to search for
:param default: optionally, a value to return, if item is not found
(if not provided, raises a KeyError if not found)
:return: the value corrsponding to the item, or default if item is not found
"""
try:
return self.popitem(item)[3]
except KeyError:
if default != RangeDict._sentinel:
return default
raise
def popvalue(self, value: V) -> List[RangeSet]:
"""
Removes all ranges corresponding to the given value from this RangeDict,
as well as the value itself. Returns a list of all the RangeSets of
various types that corresponded to the given value.
:param value: value to purge
:return: all RangeSets in this RangeDict that correspond to the given value
"""
# find a RangeSet corresponding to the value, which we can use as a key
sample_item = self._values[value][0]
# use that RangeSet to do the regular pop() function
return self.popitem(sample_item)[0]
def popempty(self) -> None:
"""
Removes all empty ranges from this RangeDict, as well as all values
that have no corresponding ranges. The RangeDict calls this method on
itself after most operations that modify it, so calling it manually,
while possible, will usually do nothing.
"""
# We start by traversing _ranges and removing all empty things.
rngsetlistnode = self._rangesets.first
while rngsetlistnode:
# rngsetlistnode is a Node(_LinkedList((RangeSet, value)))
rngsetnode = rngsetlistnode.value.first
# First, empty all RangeSets
while rngsetnode:
# rngsetnode is a Node((RangeSet, value))
rngset = rngsetnode.value[0]
# popempty() on the RangeSet in rngsetnode
rngset.popempty()
# if the RangeSet is empty, then remove it.
if rngset.isempty():
rngsetlistnode.value.pop_node(rngsetnode)
# also remove this RangeSet from .values()
self._values[rngsetnode.value[1]].remove(rngset)
# deletion while traversing is fine in a linked list only
rngsetnode = rngsetnode.next
# Next, check for an empty list of RangeSets
if len(rngsetlistnode.value) == 0:
self._rangesets.pop_node(rngsetlistnode)
# in this case, there are no RangeSets to pop, so we can leave ._values alone
# and finally, advance to the next list of RangeSets
rngsetlistnode = rngsetlistnode.next
# Once we've removed all RangeSets, we then remove all values with no corresponding Range-like objects
for value in list(self._values.keys()):
if not self._values[value]:
self._values.pop(value)
def remove(self, rng: Rangelike):
"""
Removes the given Range or RangeSet from this RangeDict, leaving behind
'empty space'.
Afterwards, empty ranges, and values with no remaining corresponding
ranges, will be automatically removed.
:param rng: Range to remove as rangekeys from this dict
"""
# no mutation unless the operation is successful
rng = RangeSet(rng)
temp = self.copy()
# do the removal on the copy
for rngsetlist in temp._rangesets:
for rngset, value in rngsetlist:
try:
rngset.discard(rng)
except TypeError:
break
temp.popempty()
self._rangesets, self._values = temp._rangesets, temp._values
def isempty(self) -> bool:
"""
:return: `True` if this RangeDict contains no values, and `False` otherwise.
"""
return not self._values
def ranges(self) -> List[RangeSet]:
"""
Returns a list of RangeSets that correspond to some value in this
RangeDict, ordered as follows:
All Rangesets of comparable types are grouped together, with
order corresponding to the order in which the first RangeSet of
the given type was added to this RangeDict (earliest first).
Within each such group, RangeSets are ordered in increasing order
of their lower bounds.
This function is analagous to Python's built-in `dict.keys()`
:return: a list of RangeSet keys in this RangeDict
"""
return [rngset for rngsetlist in self._rangesets for rngset, value in rngsetlist]
def values(self) -> List[V]:
"""
Returns a list of values that are corresponded to by some RangeSet in
this RangeDict, ordered by how recently they were added (via .`add()`
or `.update()`) or set (via `.set()` or `.setvalue()`), with the
oldest values being listed first.
This function is synonymous to Python's built-in `dict.values()`
:return: a list of values contained in this RangeDict
"""
return list(self._values.keys())
def items(self) -> List[Tuple[Any, Any]]:
"""
:return: a list of 2-tuples `(list of ranges corresponding to value, value)`, ordered
by time-of-insertion of the values (see `.values()` for more detail)
"""
return [(rngsets, value) for value, rngsets in self._values.items()]
def clear(self) -> None:
"""
Removes all items from this RangeDict, including all of the Ranges
that serve as keys, and the values to which they correspond.
"""
self._rangesets = _LinkedList()
self._values = {}
def copy(self) -> 'RangeDict':
"""
:return: a shallow copy of this RangeDict
"""
return RangeDict(self)
def _sort_ranges(self) -> None:
""" Helper method to gnomesort all _LinkedLists-of-RangeSets. """
for linkedlist in self._rangesets:
linkedlist.gnomesort()
def __setitem__(self, key: Rangelike, value: V):
"""
Equivalent to :func:`~RangeDict.add`.
"""
self.add(key, value)
def __getitem__(self, item: T):
"""
Equivalent to :func:`~RangeDict.get`. If `item` is a range, then this will only
return a corresponding value if `item` is completely contained by one
of this RangeDict's rangekeys. To get values corresponding to all
overlapping ranges, use `.getoverlap(item)` instead.
"""
return self.get(item)
def __contains__(self, item: T):
"""
:return: True if the given item corresponds to any single value in this RangeDict, False otherwise
"""
sentinel2 = object()
return not (self.get(item, sentinel2) is sentinel2)
# return any(item in rngset for rngsetlist in self._rangesets for (rngset, value) in rngsetlist)
def __len__(self) -> int:
"""
Returns the number of values, not the number of unique Ranges,
since determining how to count Ranges is Hard
:return: the number of unique values contained in this RangeDict
"""
return len(self._values)
def __eq__(self, other: 'RangeDict') -> bool:
"""
Tests whether this RangeDict is equal to the given RangeDict (has the same keys and values).
Note that this always tests equality for values, not identity, regardless of whether this
RangeDict was constructed in 'strict' mode.
:param other: RangeDict to compare against
:return: True if this RangeDict is equal to the given RangeDict, False otherwise
"""
# Actually comparing two LinkedLists together is hard, and all relevant information should be in _values anyway
# Ordering is the big challenge here - you can't order the nested LinkedLists.
# But what's important for equality between RangeDicts is that they have the same key-value pairs, which is
# properly checked just by comparing _values
return isinstance(other, RangeDict) and self._values == other._values # and self._rangesets == other._rangesets
def __ne__(self, other: 'RangeDict') -> bool:
"""
:param other: RangeDict to compare against
:return: False if this RangeDict is equal to the given RangeDict, True otherwise
"""
return not self.__eq__(other)
def __bool__(self) -> bool:
"""
:return: False if this RangeDict is empty, True otherwise
"""
return not self.isempty()
def __str__(self):
# nested f-strings, whee
return f"""{{{
', '.join(
f"{{{', '.join(str(rng) for rngset in rngsets for rng in rngset)}}}: {value}"
for value, rngsets in self._values.items()
)
}}}"""
def __repr__(self):
return f"""RangeDict{{{
', '.join(
f"RangeSet{{{', '.join(repr(rng) for rngset in rngsets for rng in rngset)}}}: {repr(value)}"
for value, rngsets in self._values.items()
)
}}}"""
| [
"typing.TypeVar"
] | [((255, 278), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': 'Any'}), "('T', bound=Any)\n", (262, 278), False, 'from typing import Iterable, Union, Any, TypeVar, List, Tuple, Dict, Tuple\n'), ((283, 306), 'typing.TypeVar', 'TypeVar', (['"""V"""'], {'bound': 'Any'}), "('V', bound=Any)\n", (290, 306), False, 'from typing import Iterable, Union, Any, TypeVar, List, Tuple, Dict, Tuple\n')] |
from django.urls import path
from cases.api.get_visuals_data import UpdateVisualsData
from cases.api.kenyan_cases import KenyanCaseList
from cases.api.visuals import VisualList
urlpatterns = [
path('kenyan/all', KenyanCaseList.as_view(), name='Historical data'),
path('history/', VisualList.as_view(), name='Historical data'),
path('update/history', UpdateVisualsData.as_view(), name='Update Historical data'),
]
| [
"cases.api.get_visuals_data.UpdateVisualsData.as_view",
"cases.api.kenyan_cases.KenyanCaseList.as_view",
"cases.api.visuals.VisualList.as_view"
] | [((218, 242), 'cases.api.kenyan_cases.KenyanCaseList.as_view', 'KenyanCaseList.as_view', ([], {}), '()\n', (240, 242), False, 'from cases.api.kenyan_cases import KenyanCaseList\n'), ((290, 310), 'cases.api.visuals.VisualList.as_view', 'VisualList.as_view', ([], {}), '()\n', (308, 310), False, 'from cases.api.visuals import VisualList\n'), ((364, 391), 'cases.api.get_visuals_data.UpdateVisualsData.as_view', 'UpdateVisualsData.as_view', ([], {}), '()\n', (389, 391), False, 'from cases.api.get_visuals_data import UpdateVisualsData\n')] |
from typing import List
import pytest
from cfn_lint_ax.rules import (
CloudfrontDistributionComment,
CloudfrontDistributionLogging,
)
from tests.utils import BAD_TEMPLATE_FIXTURES_PATH, ExpectedError, assert_all_matches
@pytest.mark.parametrize(
"filename,expected_errors",
[
(
"cloudfront_distribution_without_logging_configuration.yaml",
[
(
6,
CloudfrontDistributionLogging,
"Property Resources/Distribution/Properties/DistributionConfig/Logging is missing",
),
],
),
(
"cloudfront_distribution_without_comment.yaml",
[
(
6,
CloudfrontDistributionComment,
"Property Resources/Distribution/Properties/DistributionConfig/Comment is missing",
),
],
),
],
)
def test_bad_cloudfront_distribution_config(
filename: str, expected_errors: List[ExpectedError]
) -> None:
filename = (BAD_TEMPLATE_FIXTURES_PATH / filename).as_posix()
assert_all_matches(filename, expected_errors)
| [
"pytest.mark.parametrize",
"tests.utils.assert_all_matches"
] | [((233, 675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""filename,expected_errors"""', "[('cloudfront_distribution_without_logging_configuration.yaml', [(6,\n CloudfrontDistributionLogging,\n 'Property Resources/Distribution/Properties/DistributionConfig/Logging is missing'\n )]), ('cloudfront_distribution_without_comment.yaml', [(6,\n CloudfrontDistributionComment,\n 'Property Resources/Distribution/Properties/DistributionConfig/Comment is missing'\n )])]"], {}), "('filename,expected_errors', [(\n 'cloudfront_distribution_without_logging_configuration.yaml', [(6,\n CloudfrontDistributionLogging,\n 'Property Resources/Distribution/Properties/DistributionConfig/Logging is missing'\n )]), ('cloudfront_distribution_without_comment.yaml', [(6,\n CloudfrontDistributionComment,\n 'Property Resources/Distribution/Properties/DistributionConfig/Comment is missing'\n )])])\n", (256, 675), False, 'import pytest\n'), ((1152, 1197), 'tests.utils.assert_all_matches', 'assert_all_matches', (['filename', 'expected_errors'], {}), '(filename, expected_errors)\n', (1170, 1197), False, 'from tests.utils import BAD_TEMPLATE_FIXTURES_PATH, ExpectedError, assert_all_matches\n')] |
'''
records.py: base record class for holding data
Authors
-------
<NAME> <<EMAIL>> -- Caltech Library
Copyright
---------
Copyright (c) 2018 by the California Institute of Technology. This code is
open-source software released under a 3-clause BSD license. Please see the
file "LICENSE" for more information.
'''
import holdit
from holdit.debug import log
# Class definitions.
# .............................................................................
# The particular set of fields in this object came from the TIND holds page
# contents and a few additional fields kept in the tracking spreadsheet by
# the Caltech Library circulation staff.
class HoldRecord(object):
'''Base class for records describing a hold request.'''
def __init__(self):
self.requester_name = '' # String
self.requester_type = '' # String
self.requester_url = '' # String
self.item_title = '' # String
self.item_details_url = '' # String
self.item_record_url = '' # String
self.item_call_number = ''
self.item_barcode = ''
self.item_location_name = '' # String
self.item_location_code = '' # String
self.item_loan_status = '' # String
self.item_loan_url = '' # String
self.date_requested = '' # String (date)
self.date_due = '' # String (date)
self.date_last_notice_sent = '' # String (date)
self.overdue_notices_count = '' # String
self.holds_count = '' # String
# Utility functions.
# .............................................................................
def records_diff(known_records, new_records):
'''Returns the records from 'new_records' missing from 'known_records'.
The comparison is done on the basis of bar codes and request dates.'''
if __debug__: log('Diffing known records with new records')
diffs = []
for candidate in new_records:
found = [record for record in known_records if same_request(record, candidate)]
if not found:
diffs.append(candidate)
if __debug__: log('Found {} different records', len(diffs))
return diffs
def same_request(record1, record2):
return (record1.item_barcode == record2.item_barcode
and record1.date_requested == record2.date_requested
and record1.requester_name == record2.requester_name)
def records_filter(method = 'all'):
'''Returns a closure that takes a TindRecord and returns True or False,
depending on whether the record should be included in the output. This
is meant to be passed to Python filter() as the test function.
'''
# FIXME. It seemed like it might be useful to provide filtering features
# in the future, but this is currently a no-op.
return (lambda x: True)
# Debugging aids.
def print_records(records_list, specific = None):
for record in records_list:
print('title: {}\nbarcode: {}\nlocation: {}\ndate requested: {}\nrequester name: {}\nstatus in TIND: {}\n\n'
.format(record.item_title,
record.item_barcode,
record.item_location_code,
record.date_requested,
record.requester_name,
record.item_loan_status))
def find_record(barcode, records_list):
for record in records_list:
if record.item_barcode == barcode:
return record
return None
| [
"holdit.debug.log"
] | [((2023, 2068), 'holdit.debug.log', 'log', (['"""Diffing known records with new records"""'], {}), "('Diffing known records with new records')\n", (2026, 2068), False, 'from holdit.debug import log\n')] |
#!/usr/bin/env python
##############################################################
# preparation of srtm data for use in gamma
# module of software pyroSAR
# <NAME> 2014-18
##############################################################
"""
The following tasks are performed by executing this script:
-reading of a parameter file dem.par
--see object par for necessary values; file is automatically created by starting the script via the GUI
-if necessary, creation of output and logfile directories
-generation of a DEM parameter file for each .hgt (SRTM) file in the working directory or its subdirectories
--the corresponding GAMMA command is create_dem_par, which is interactive. the list variables dempar and dempar2 are piped to the command line for automation
-if multiple files are found, mosaicing is performed
-replacement and interpolation of missing values
-transformation from equiangular (EQA) to UTM projection using a SLC parameter file
"""
import sys
if sys.version_info >= (3, 0):
from urllib.request import urlopen
else:
from urllib2 import urlopen
import os
import re
import shutil
import zipfile as zf
from spatialist.envi import HDRobject, hdr
from spatialist import raster
from . import ISPPar, process, UTM, slc_corners
import pyroSAR
from pyroSAR.ancillary import finder, run
def fill(dem, dem_out, logpath=None, replace=False):
width = ISPPar(dem + '.par').width
path_dem = os.path.dirname(dem_out)
rpl_flg = 0
dtype = 4
# replace values
value = 0
new_value = 1
process(['replace_values', dem, value, new_value, dem + '_temp', width, rpl_flg, dtype], path_dem, logpath)
value = -32768
new_value = 0
process(['replace_values', dem + '_temp', value, new_value, dem + '_temp2', width, rpl_flg, dtype], path_dem, logpath)
# interpolate missing values
r_max = 9
np_min = 40
np_max = 81
w_mode = 2
process(['interp_ad', dem + '_temp2', dem_out, width, r_max, np_min, np_max, w_mode, dtype], path_dem, logpath)
# remove temporary files
os.remove(dem+'_temp')
os.remove(dem+'_temp2')
# duplicate parameter file for newly created dem
shutil.copy(dem+'.par', dem_out+'.par')
# create ENVI header file
hdr(dem_out+'.par')
if replace:
for item in [dem+x for x in ['', '.par', '.hdr', '.aux.xml'] if os.path.isfile(dem+x)]:
os.remove(item)
def transform(infile, outfile, posting=90):
"""
transform SRTM DEM from EQA to UTM projection
"""
# read DEM parameter file
par = ISPPar(infile + '.par')
# transform corner coordinate to UTM
utm = UTM(infile + '.par')
for item in [outfile, outfile+'.par']:
if os.path.isfile(item):
os.remove(item)
# determine false northing from parameter file coordinates
falsenorthing = 10000000. if par.corner_lat < 0 else 0
# create new DEM parameter file with UTM projection details
inlist = ['UTM', 'WGS84', 1, utm.zone, falsenorthing, os.path.basename(outfile), '', '', '', '', '', '-{0} {0}'.format(posting), '']
process(['create_dem_par', outfile + '.par'], inlist=inlist)
# transform dem
process(['dem_trans', infile + '.par', infile, outfile + '.par', outfile, '-', '-', '-', 1])
hdr(outfile+'.par')
def dempar(dem, logpath=None):
"""
create GAMMA parameter text files for DEM files
currently only EQA and UTM projections with WGS84 ellipsoid are supported
"""
rast = raster.Raster(dem)
# determine data type
dtypes = {'Int16': 'INTEGER*2', 'UInt16': 'INTEGER*2', 'Float32': 'REAL*4'}
if rast.dtype not in dtypes:
raise IOError('data type not supported')
else:
dtype = dtypes[rast.dtype]
# format pixel posting and top left coordinate
posting = str(rast.geo['yres'])+' '+str(rast.geo['xres'])
latlon = str(rast.geo['ymax'])+' '+str(rast.geo['xmin'])
# evaluate projection
projections = {'longlat': 'EQA', 'utm': 'UTM'}
if rast.proj4args['proj'] not in projections:
raise ValueError('projection not supported (yet)')
else:
projection = projections[rast.proj4args['proj']]
# get ellipsoid
ellipsoid = rast.proj4args['ellps'] if 'ellps' in rast.proj4args else rast.proj4args['datum']
if ellipsoid != 'WGS84':
raise ValueError('ellipsoid not supported (yet)')
# create list for GAMMA command input
if projection == 'UTM':
zone = rast.proj4args['zone']
falsenorthing = 10000000. if rast.geo['ymin'] < 0 else 0
parlist = [projection, ellipsoid, 1, zone, falsenorthing, os.path.basename(dem), dtype, 0, 1, rast.cols, rast.rows, posting, latlon]
else:
parlist = [projection, ellipsoid, 1, os.path.basename(dem), dtype, 0, 1, rast.cols, rast.rows, posting, latlon]
# execute GAMMA command
process(['create_dem_par', os.path.splitext(dem)[0] + '.par'], os.path.dirname(dem), logpath, inlist=parlist)
def swap(data, outname):
"""
byte swapping from small to big endian (as required by GAMMA)
"""
rast = raster.Raster(data)
dtype = rast.dtype
if rast.format != 'ENVI':
raise IOError('only ENVI format supported')
dtype_lookup = {'Int16': 2, 'CInt16': 2, 'Int32': 4, 'Float32': 4, 'CFloat32': 4, 'Float64': 8}
if dtype not in dtype_lookup:
raise IOError('data type {} not supported'.format(dtype))
process(['swap_bytes', data, outname, str(dtype_lookup[dtype])])
header = HDRobject(data+'.hdr')
header.byte_order = 1
hdr(header, outname+'.hdr')
def mosaic(demlist, outname, byteorder=1, gammapar=True):
"""
mosaicing of multiple DEMs
"""
if len(demlist) < 2:
raise IOError('length of demlist < 2')
nodata = str(raster.Raster(demlist[0]).nodata)
run(['gdalwarp', '-q', '-of', 'ENVI', '-srcnodata', nodata, '-dstnodata', nodata, demlist, outname])
if byteorder == 1:
swap(outname, outname+'_swap')
for item in [outname, outname+'.hdr', outname+'.aux.xml']:
os.remove(item)
os.rename(outname+'_swap', outname)
os.rename(outname+'_swap.hdr', outname+'.hdr')
if gammapar:
dempar(outname)
def hgt(parfiles):
"""
concatenate hgt file names overlapping with multiple SAR scenes
input is a list of GAMMA SAR scene parameter files
this list is read for corner coordinates of which the next integer lower left latitude and longitude is computed
hgt files are supplied in 1 degree equiangular format named e.g. N16W094.hgt (with pattern [NS][0-9]{2}[EW][0-9]{3}.hgt
For north and east hemisphere the respective absolute latitude and longitude values are smaller than the lower left coordinate of the SAR image
west and south coordinates are negative and hence the nearest lower left integer absolute value is going to be larger
"""
lat = []
lon = []
for parfile in parfiles:
if isinstance(parfile, pyroSAR.ID):
corners = parfile.getCorners()
elif parfile.endswith('.par'):
corners = slc_corners(parfile)
lat += [int(float(corners[x]) // 1) for x in ['ymin', 'ymax']]
lon += [int(float(corners[x]) // 1) for x in ['xmin', 'xmax']]
# add missing lat/lon values (and add an extra buffer of one degree)
lat = range(min(lat), max(lat)+1)
lon = range(min(lon), max(lon)+1)
# convert coordinates to string with leading zeros and hemisphere identification letter
lat = [str(x).zfill(2+len(str(x))-len(str(x).strip('-'))) for x in lat]
lat = [x.replace('-', 'S') if '-' in x else 'N'+x for x in lat]
lon = [str(x).zfill(3+len(str(x))-len(str(x).strip('-'))) for x in lon]
lon = [x.replace('-', 'W') if '-' in x else 'E'+x for x in lon]
# concatenate all formatted latitudes and longitudes with each other as final product
return [x+y+'.hgt' for x in lat for y in lon]
def makeSRTM(scenes, srtmdir, outname):
"""
Create a DEM from SRTM tiles
Input is a list of pyroSAR.ID objects from which coordinates are read to determine the required DEM extent
Mosaics SRTM DEM tiles, converts them to Gamma format and subtracts offset to WGS84 ellipsoid
for DEMs downloaded from USGS http://gdex.cr.usgs.gov or CGIAR http://srtm.csi.cgiar.org
"""
tempdir = outname+'___temp'
os.makedirs(tempdir)
hgt_options = hgt(scenes)
hgt_files = finder(srtmdir, hgt_options)
# todo: check if really needed
nodatas = [str(int(raster.Raster(x).nodata)) for x in hgt_files]
srtm_vrt = os.path.join(tempdir, 'srtm.vrt')
srtm_temp = srtm_vrt.replace('.vrt', '_tmp')
srtm_final = srtm_vrt.replace('.vrt', '')
run(['gdalbuildvrt', '-overwrite', '-srcnodata', ' '.join(nodatas), srtm_vrt, hgt_files])
run(['gdal_translate', '-of', 'ENVI', '-a_nodata', -32768, srtm_vrt, srtm_temp])
process(['srtm2dem', srtm_temp, srtm_final, srtm_final + '.par', 2, '-'], outdir=tempdir)
shutil.move(srtm_final, outname)
shutil.move(srtm_final+'.par', outname+'.par')
hdr(outname+'.par')
shutil.rmtree(tempdir)
def hgt_collect(parfiles, outdir, demdir=None, arcsec=3):
"""
automatic downloading and unpacking of srtm tiles
base directory must contain SLC files in GAMMA format including their parameter files for reading coordinates
additional dem directory may locally contain srtm files. This directory is searched for locally existing files, which are then copied to the current working directory
"""
# concatenate required hgt tile names
target_ids = hgt(parfiles)
targets = []
pattern = '[NS][0-9]{2}[EW][0-9]{3}'
# if an additional dem directory has been defined, check this directory for required hgt tiles
if demdir is not None:
targets.extend(finder(demdir, target_ids))
# check for additional potentially existing hgt tiles in the defined output directory
extras = [os.path.join(outdir, x) for x in target_ids if os.path.isfile(os.path.join(outdir, x)) and not re.search(x, '\n'.join(targets))]
targets.extend(extras)
print('found {} relevant SRTM tiles...'.format(len(targets)))
# search server for all required tiles, which were not found in the local directories
if len(targets) < len(target_ids):
print('searching for additional SRTM tiles on the server...')
onlines = []
if arcsec == 1:
remotes = ['http://e4ftl01.cr.usgs.gov/SRTM/SRTMGL1.003/2000.02.11/']
remotepattern = pattern+'.SRTMGL1.hgt.zip'
elif arcsec == 3:
server = 'http://dds.cr.usgs.gov/srtm/version2_1/SRTM3/'
remotes = [os.path.join(server, x) for x in ['Africa', 'Australia', 'Eurasia', 'Islands', 'North_America', 'South_America']]
remotepattern = pattern+'[.]hgt.zip'
else:
raise ValueError('argument arcsec must be of value 1 or 3')
for remote in remotes:
response = urlopen(remote).read()
items = sorted(set(re.findall(remotepattern, response)))
for item in items:
outname = re.findall(pattern, item)[0]+'.hgt'
if outname in target_ids and outname not in [os.path.basename(x) for x in targets]:
onlines.append(os.path.join(remote, item))
# if additional tiles have been found online, download and unzip them to the local directory
if len(onlines) > 0:
print('downloading {} SRTM tiles...'.format(len(onlines)))
for candidate in onlines:
localname = os.path.join(outdir, re.findall(pattern, candidate)[0]+'.hgt')
infile = urlopen(candidate)
with open(localname+'.zip', 'wb') as outfile:
outfile.write(infile.read())
infile.close()
with zf.ZipFile(localname+'.zip', 'r') as z:
z.extractall(outdir)
os.remove(localname+'.zip')
targets.append(localname)
return targets
if __name__ == '__main__':
main()
| [
"zipfile.ZipFile",
"spatialist.raster.Raster",
"pyroSAR.ancillary.run",
"os.remove",
"spatialist.envi.HDRobject",
"urllib2.urlopen",
"pyroSAR.ancillary.finder",
"shutil.move",
"os.rename",
"os.path.splitext",
"os.path.isfile",
"os.path.dirname",
"shutil.copy",
"re.findall",
"os.makedirs"... | [((1426, 1450), 'os.path.dirname', 'os.path.dirname', (['dem_out'], {}), '(dem_out)\n', (1441, 1450), False, 'import os\n'), ((2054, 2078), 'os.remove', 'os.remove', (["(dem + '_temp')"], {}), "(dem + '_temp')\n", (2063, 2078), False, 'import os\n'), ((2081, 2106), 'os.remove', 'os.remove', (["(dem + '_temp2')"], {}), "(dem + '_temp2')\n", (2090, 2106), False, 'import os\n'), ((2163, 2206), 'shutil.copy', 'shutil.copy', (["(dem + '.par')", "(dem_out + '.par')"], {}), "(dem + '.par', dem_out + '.par')\n", (2174, 2206), False, 'import shutil\n'), ((2238, 2259), 'spatialist.envi.hdr', 'hdr', (["(dem_out + '.par')"], {}), "(dem_out + '.par')\n", (2241, 2259), False, 'from spatialist.envi import HDRobject, hdr\n'), ((3265, 3286), 'spatialist.envi.hdr', 'hdr', (["(outfile + '.par')"], {}), "(outfile + '.par')\n", (3268, 3286), False, 'from spatialist.envi import HDRobject, hdr\n'), ((3475, 3493), 'spatialist.raster.Raster', 'raster.Raster', (['dem'], {}), '(dem)\n', (3488, 3493), False, 'from spatialist import raster\n'), ((5071, 5090), 'spatialist.raster.Raster', 'raster.Raster', (['data'], {}), '(data)\n', (5084, 5090), False, 'from spatialist import raster\n'), ((5478, 5502), 'spatialist.envi.HDRobject', 'HDRobject', (["(data + '.hdr')"], {}), "(data + '.hdr')\n", (5487, 5502), False, 'from spatialist.envi import HDRobject, hdr\n'), ((5531, 5560), 'spatialist.envi.hdr', 'hdr', (['header', "(outname + '.hdr')"], {}), "(header, outname + '.hdr')\n", (5534, 5560), False, 'from spatialist.envi import HDRobject, hdr\n'), ((5793, 5897), 'pyroSAR.ancillary.run', 'run', (["['gdalwarp', '-q', '-of', 'ENVI', '-srcnodata', nodata, '-dstnodata',\n nodata, demlist, outname]"], {}), "(['gdalwarp', '-q', '-of', 'ENVI', '-srcnodata', nodata, '-dstnodata',\n nodata, demlist, outname])\n", (5796, 5897), False, 'from pyroSAR.ancillary import finder, run\n'), ((8332, 8352), 'os.makedirs', 'os.makedirs', (['tempdir'], {}), '(tempdir)\n', (8343, 8352), False, 'import os\n'), ((8401, 8429), 'pyroSAR.ancillary.finder', 'finder', (['srtmdir', 'hgt_options'], {}), '(srtmdir, hgt_options)\n', (8407, 8429), False, 'from pyroSAR.ancillary import finder, run\n'), ((8551, 8584), 'os.path.join', 'os.path.join', (['tempdir', '"""srtm.vrt"""'], {}), "(tempdir, 'srtm.vrt')\n", (8563, 8584), False, 'import os\n'), ((8780, 8865), 'pyroSAR.ancillary.run', 'run', (["['gdal_translate', '-of', 'ENVI', '-a_nodata', -32768, srtm_vrt, srtm_temp]"], {}), "(['gdal_translate', '-of', 'ENVI', '-a_nodata', -32768, srtm_vrt, srtm_temp]\n )\n", (8783, 8865), False, 'from pyroSAR.ancillary import finder, run\n'), ((8961, 8993), 'shutil.move', 'shutil.move', (['srtm_final', 'outname'], {}), '(srtm_final, outname)\n', (8972, 8993), False, 'import shutil\n'), ((8998, 9048), 'shutil.move', 'shutil.move', (["(srtm_final + '.par')", "(outname + '.par')"], {}), "(srtm_final + '.par', outname + '.par')\n", (9009, 9048), False, 'import shutil\n'), ((9049, 9070), 'spatialist.envi.hdr', 'hdr', (["(outname + '.par')"], {}), "(outname + '.par')\n", (9052, 9070), False, 'from spatialist.envi import HDRobject, hdr\n'), ((9074, 9096), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (9087, 9096), False, 'import shutil\n'), ((2703, 2723), 'os.path.isfile', 'os.path.isfile', (['item'], {}), '(item)\n', (2717, 2723), False, 'import os\n'), ((2999, 3024), 'os.path.basename', 'os.path.basename', (['outfile'], {}), '(outfile)\n', (3015, 3024), False, 'import os\n'), ((4904, 4924), 'os.path.dirname', 'os.path.dirname', (['dem'], {}), '(dem)\n', (4919, 4924), False, 'import os\n'), ((6059, 6096), 'os.rename', 'os.rename', (["(outname + '_swap')", 'outname'], {}), "(outname + '_swap', outname)\n", (6068, 6096), False, 'import os\n'), ((6103, 6153), 'os.rename', 'os.rename', (["(outname + '_swap.hdr')", "(outname + '.hdr')"], {}), "(outname + '_swap.hdr', outname + '.hdr')\n", (6112, 6153), False, 'import os\n'), ((9929, 9952), 'os.path.join', 'os.path.join', (['outdir', 'x'], {}), '(outdir, x)\n', (9941, 9952), False, 'import os\n'), ((2383, 2398), 'os.remove', 'os.remove', (['item'], {}), '(item)\n', (2392, 2398), False, 'import os\n'), ((2737, 2752), 'os.remove', 'os.remove', (['item'], {}), '(item)\n', (2746, 2752), False, 'import os\n'), ((4603, 4624), 'os.path.basename', 'os.path.basename', (['dem'], {}), '(dem)\n', (4619, 4624), False, 'import os\n'), ((4733, 4754), 'os.path.basename', 'os.path.basename', (['dem'], {}), '(dem)\n', (4749, 4754), False, 'import os\n'), ((5755, 5780), 'spatialist.raster.Raster', 'raster.Raster', (['demlist[0]'], {}), '(demlist[0])\n', (5768, 5780), False, 'from spatialist import raster\n'), ((6035, 6050), 'os.remove', 'os.remove', (['item'], {}), '(item)\n', (6044, 6050), False, 'import os\n'), ((9796, 9822), 'pyroSAR.ancillary.finder', 'finder', (['demdir', 'target_ids'], {}), '(demdir, target_ids)\n', (9802, 9822), False, 'from pyroSAR.ancillary import finder, run\n'), ((2347, 2370), 'os.path.isfile', 'os.path.isfile', (['(dem + x)'], {}), '(dem + x)\n', (2361, 2370), False, 'import os\n'), ((11661, 11679), 'urllib2.urlopen', 'urlopen', (['candidate'], {}), '(candidate)\n', (11668, 11679), False, 'from urllib2 import urlopen\n'), ((11940, 11969), 'os.remove', 'os.remove', (["(localname + '.zip')"], {}), "(localname + '.zip')\n", (11949, 11969), False, 'import os\n'), ((4868, 4889), 'os.path.splitext', 'os.path.splitext', (['dem'], {}), '(dem)\n', (4884, 4889), False, 'import os\n'), ((8489, 8505), 'spatialist.raster.Raster', 'raster.Raster', (['x'], {}), '(x)\n', (8502, 8505), False, 'from spatialist import raster\n'), ((9991, 10014), 'os.path.join', 'os.path.join', (['outdir', 'x'], {}), '(outdir, x)\n', (10003, 10014), False, 'import os\n'), ((10653, 10676), 'os.path.join', 'os.path.join', (['server', 'x'], {}), '(server, x)\n', (10665, 10676), False, 'import os\n'), ((10957, 10972), 'urllib2.urlopen', 'urlopen', (['remote'], {}), '(remote)\n', (10964, 10972), False, 'from urllib2 import urlopen\n'), ((11011, 11046), 're.findall', 're.findall', (['remotepattern', 'response'], {}), '(remotepattern, response)\n', (11021, 11046), False, 'import re\n'), ((11843, 11878), 'zipfile.ZipFile', 'zf.ZipFile', (["(localname + '.zip')", '"""r"""'], {}), "(localname + '.zip', 'r')\n", (11853, 11878), True, 'import zipfile as zf\n'), ((11106, 11131), 're.findall', 're.findall', (['pattern', 'item'], {}), '(pattern, item)\n', (11116, 11131), False, 'import re\n'), ((11277, 11303), 'os.path.join', 'os.path.join', (['remote', 'item'], {}), '(remote, item)\n', (11289, 11303), False, 'import os\n'), ((11203, 11222), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (11219, 11222), False, 'import os\n'), ((11594, 11624), 're.findall', 're.findall', (['pattern', 'candidate'], {}), '(pattern, candidate)\n', (11604, 11624), False, 'import re\n')] |
import unittest
from my_test_api import TestAPI
class TestCreateIssue(TestAPI):
def test_create_issue(self):
params = {
'project': 'API',
'summary': 'test issue by robots',
'description': 'You are mine ! ',
}
response = self.put('/issue/', params)
issue_id = response.headers['Location'].split('/')[-1]
print('Created item ID is ', issue_id)
self.assertEquals(response.status_code, 201)
response = self.get('/issue/' + issue_id)
self.assertEquals(response.status_code, 200)
if __name__ == '__main__':
unittest.main() | [
"unittest.main"
] | [((616, 631), 'unittest.main', 'unittest.main', ([], {}), '()\n', (629, 631), False, 'import unittest\n')] |
from model.contact import Contact
from model.group import Group
import random
def test_add_contact_to_group(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="contact", lastname="forGroup", address="UA, Kyiv, KPI", homephone="0123456789", email="<EMAIL>"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="groupForContact", header="header", footer="footer"))
contact = random.choice(db.get_contact_list())
group = random.choice(db.get_group_list())
app.contact.add_contact_to_group(contact.id, group.id)
assert object_in_list(contact, db.get_contacts_from_group(group))
# assert db.get_contacts_from_group(group).__contains__(contact)
def test_add_contact_to_group_2(app, db, orm):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="contact", lastname="forGroup", address="UA, Kyiv, KPI", homephone="0123456789", email="<EMAIL>"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="groupForContact", header="header", footer="footer"))
contact = random.choice(db.get_contact_list())
group = random.choice(db.get_group_list())
app.contact.add_contact_to_group(contact.id, group.id)
assert contact in orm.get_contacts_in_group(group)
def object_in_list(object, list):
if object in list:
return True
else:
return False
| [
"model.group.Group",
"model.contact.Contact"
] | [((187, 306), 'model.contact.Contact', 'Contact', ([], {'firstname': '"""contact"""', 'lastname': '"""forGroup"""', 'address': '"""UA, Kyiv, KPI"""', 'homephone': '"""0123456789"""', 'email': '"""<EMAIL>"""'}), "(firstname='contact', lastname='forGroup', address='UA, Kyiv, KPI',\n homephone='0123456789', email='<EMAIL>')\n", (194, 306), False, 'from model.contact import Contact\n'), ((367, 430), 'model.group.Group', 'Group', ([], {'name': '"""groupForContact"""', 'header': '"""header"""', 'footer': '"""footer"""'}), "(name='groupForContact', header='header', footer='footer')\n", (372, 430), False, 'from model.group import Group\n'), ((843, 962), 'model.contact.Contact', 'Contact', ([], {'firstname': '"""contact"""', 'lastname': '"""forGroup"""', 'address': '"""UA, Kyiv, KPI"""', 'homephone': '"""0123456789"""', 'email': '"""<EMAIL>"""'}), "(firstname='contact', lastname='forGroup', address='UA, Kyiv, KPI',\n homephone='0123456789', email='<EMAIL>')\n", (850, 962), False, 'from model.contact import Contact\n'), ((1023, 1086), 'model.group.Group', 'Group', ([], {'name': '"""groupForContact"""', 'header': '"""header"""', 'footer': '"""footer"""'}), "(name='groupForContact', header='header', footer='footer')\n", (1028, 1086), False, 'from model.group import Group\n')] |
import csv
from collections import Counter
from collections import defaultdict
from datetime import datetime
# Make dictionary with district as key
# Create the CSV file: csvfile
csvfile = open('crime_sampler.csv', 'r')
# Create a dictionary that defaults to a list: crimes_by_district
crimes_by_district = defaultdict(list)
# Loop over a DictReader of the CSV file
for row in csv.DictReader(csvfile):
# Pop the district from each row: district
district = row.pop('District')
# Append the rest of the data to the list for proper district in crimes_by_district
crimes_by_district[district].append(row)
# Number of arrests in each City District for each year
# Loop over the crimes_by_district using expansion as district and crimes
for district, crimes in crimes_by_district.items():
# Print the district
print(district)
# Create an empty Counter object: year_count
year_count = Counter()
# Loop over the crimes:
for crime in crimes:
# If there was an arrest
if crime['Arrest'] == 'true':
# Convert the Date to a datetime and get the year
year = datetime.strptime(crime['Date'], '%m/%d/%Y %I:%M:%S %p').year
# Increment the Counter for the year
year_count[year] += 1
# Print the counter
print(year_count)
# Insight: Looks like most arrests took place in the 11th district
| [
"collections.Counter",
"csv.DictReader",
"collections.defaultdict",
"datetime.datetime.strptime"
] | [((310, 327), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (321, 327), False, 'from collections import defaultdict\n'), ((381, 404), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (395, 404), False, 'import csv\n'), ((921, 930), 'collections.Counter', 'Counter', ([], {}), '()\n', (928, 930), False, 'from collections import Counter\n'), ((1141, 1197), 'datetime.datetime.strptime', 'datetime.strptime', (["crime['Date']", '"""%m/%d/%Y %I:%M:%S %p"""'], {}), "(crime['Date'], '%m/%d/%Y %I:%M:%S %p')\n", (1158, 1197), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python
# Break up idstr file into separate measid/objectid lists per exposure on /data0
import os
import sys
import numpy as np
import time
from dlnpyutils import utils as dln, db
from astropy.io import fits
import sqlite3
import socket
from argparse import ArgumentParser
def breakup_idstr(dbfile):
""" Break-up idstr file into separate measid/objectid lists per exposure on /data0."""
t00 = time.time()
outdir = '/data0/dnidever/nsc/instcal/v3/idstr/'
# Load the exposures table
expcat = fits.getdata('/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz',1)
# Make sure it's a list
if type(dbfile) is str: dbfile=[dbfile]
print('Breaking up '+str(len(dbfile))+' database files')
# Loop over files
for i,dbfile1 in enumerate(dbfile):
print(str(i+1)+' '+dbfile1)
if os.path.exists(dbfile1):
t0 = time.time()
dbbase1 = os.path.basename(dbfile1)[0:-9] # remove _idstr.db ending
# Get existing index names for this database
d = sqlite3.connect(dbfile1, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = d.cursor()
cmd = 'select measid,exposure,objectid from idstr'
t1 = time.time()
data = cur.execute(cmd).fetchall()
print(' '+str(len(data))+' rows read in %5.1f sec. ' % (time.time()-t1))
# Break up data into lists
measid,exposure,objectid = list(zip(*data))
measid = np.array(measid)
objectid = np.array(objectid)
exposure = np.array(exposure)
eindex = dln.create_index(exposure)
# Match exposures to exposure catalog
ind1,ind2 = dln.match(expcat['EXPOSURE'],eindex['value'])
# Loop over exposures and write output files
nexp = len(eindex['value'])
print(' '+str(nexp)+' exposures')
measid_maxlen = np.max(dln.strlen(measid))
objectid_maxlen = np.max(dln.strlen(objectid))
df = np.dtype([('measid',np.str,measid_maxlen+1),('objectid',np.str,objectid_maxlen+1)])
# Loop over the exposures and write out the files
for k in range(nexp):
if nexp>100:
if k % 100 == 0: print(' '+str(k+1))
ind = eindex['index'][eindex['lo'][k]:eindex['hi'][k]+1]
cat = np.zeros(len(ind),dtype=df)
cat['measid'] = measid[ind]
cat['objectid'] = objectid[ind]
instcode = expcat['INSTRUMENT'][ind1[k]]
dateobs = expcat['DATEOBS'][ind1[k]]
night = dateobs[0:4]+dateobs[5:7]+dateobs[8:10]
if os.path.exists(outdir+instcode+'/'+night+'/'+eindex['value'][k]) is False:
# Sometimes this crashes because another process is making the directory at the same time
try:
os.makedirs(outdir+instcode+'/'+night+'/'+eindex['value'][k])
except:
pass
outfile = outdir+instcode+'/'+night+'/'+eindex['value'][k]+'/'+eindex['value'][k]+'__'+dbbase1+'.npy'
np.save(outfile,cat)
print(' dt = %6.1f sec. ' % (time.time()-t0))
else:
print(' '+dbfile1+' NOT FOUND')
print('dt = %6.1f sec.' % (time.time()-t00))
if __name__ == "__main__":
parser = ArgumentParser(description='Break up idstr into separate lists per exposure.')
parser.add_argument('dbfile', type=str, nargs=1, help='Database filename')
args = parser.parse_args()
hostname = socket.gethostname()
host = hostname.split('.')[0]
dbfile = args.dbfile[0]
# Input is a list
if dbfile[0]=='@':
listfile = dbfile[1:]
if os.path.exists(listfile):
dbfile = dln.readlines(listfile)
else:
print(listfile+' NOT FOUND')
sys.exit()
breakup_idstr(dbfile)
| [
"os.path.exists",
"dlnpyutils.utils.match",
"sys.exit",
"sqlite3.connect",
"argparse.ArgumentParser",
"os.makedirs",
"dlnpyutils.utils.strlen",
"dlnpyutils.utils.create_index",
"numpy.array",
"astropy.io.fits.getdata",
"dlnpyutils.utils.readlines",
"os.path.basename",
"time.time",
"numpy.d... | [((421, 432), 'time.time', 'time.time', ([], {}), '()\n', (430, 432), False, 'import time\n'), ((532, 624), 'astropy.io.fits.getdata', 'fits.getdata', (['"""/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz"""', '(1)'], {}), "(\n '/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz', 1)\n", (544, 624), False, 'from astropy.io import fits\n'), ((3464, 3542), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Break up idstr into separate lists per exposure."""'}), "(description='Break up idstr into separate lists per exposure.')\n", (3478, 3542), False, 'from argparse import ArgumentParser\n'), ((3669, 3689), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3687, 3689), False, 'import socket\n'), ((864, 887), 'os.path.exists', 'os.path.exists', (['dbfile1'], {}), '(dbfile1)\n', (878, 887), False, 'import os\n'), ((3839, 3863), 'os.path.exists', 'os.path.exists', (['listfile'], {}), '(listfile)\n', (3853, 3863), False, 'import os\n'), ((906, 917), 'time.time', 'time.time', ([], {}), '()\n', (915, 917), False, 'import time\n'), ((1072, 1164), 'sqlite3.connect', 'sqlite3.connect', (['dbfile1'], {'detect_types': '(sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)'}), '(dbfile1, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.\n PARSE_COLNAMES)\n', (1087, 1164), False, 'import sqlite3\n'), ((1267, 1278), 'time.time', 'time.time', ([], {}), '()\n', (1276, 1278), False, 'import time\n'), ((1528, 1544), 'numpy.array', 'np.array', (['measid'], {}), '(measid)\n', (1536, 1544), True, 'import numpy as np\n'), ((1568, 1586), 'numpy.array', 'np.array', (['objectid'], {}), '(objectid)\n', (1576, 1586), True, 'import numpy as np\n'), ((1610, 1628), 'numpy.array', 'np.array', (['exposure'], {}), '(exposure)\n', (1618, 1628), True, 'import numpy as np\n'), ((1650, 1676), 'dlnpyutils.utils.create_index', 'dln.create_index', (['exposure'], {}), '(exposure)\n', (1666, 1676), True, 'from dlnpyutils import utils as dln, db\n'), ((1751, 1797), 'dlnpyutils.utils.match', 'dln.match', (["expcat['EXPOSURE']", "eindex['value']"], {}), "(expcat['EXPOSURE'], eindex['value'])\n", (1760, 1797), True, 'from dlnpyutils import utils as dln, db\n'), ((2072, 2169), 'numpy.dtype', 'np.dtype', (["[('measid', np.str, measid_maxlen + 1), ('objectid', np.str, \n objectid_maxlen + 1)]"], {}), "([('measid', np.str, measid_maxlen + 1), ('objectid', np.str, \n objectid_maxlen + 1)])\n", (2080, 2169), True, 'import numpy as np\n'), ((3887, 3910), 'dlnpyutils.utils.readlines', 'dln.readlines', (['listfile'], {}), '(listfile)\n', (3900, 3910), True, 'from dlnpyutils import utils as dln, db\n'), ((3978, 3988), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3986, 3988), False, 'import sys\n'), ((940, 965), 'os.path.basename', 'os.path.basename', (['dbfile1'], {}), '(dbfile1)\n', (956, 965), False, 'import os\n'), ((1976, 1994), 'dlnpyutils.utils.strlen', 'dln.strlen', (['measid'], {}), '(measid)\n', (1986, 1994), True, 'from dlnpyutils import utils as dln, db\n'), ((2033, 2053), 'dlnpyutils.utils.strlen', 'dln.strlen', (['objectid'], {}), '(objectid)\n', (2043, 2053), True, 'from dlnpyutils import utils as dln, db\n'), ((3234, 3255), 'numpy.save', 'np.save', (['outfile', 'cat'], {}), '(outfile, cat)\n', (3241, 3255), True, 'import numpy as np\n'), ((3405, 3416), 'time.time', 'time.time', ([], {}), '()\n', (3414, 3416), False, 'import time\n'), ((2747, 2821), 'os.path.exists', 'os.path.exists', (["(outdir + instcode + '/' + night + '/' + eindex['value'][k])"], {}), "(outdir + instcode + '/' + night + '/' + eindex['value'][k])\n", (2761, 2821), False, 'import os\n'), ((2981, 3052), 'os.makedirs', 'os.makedirs', (["(outdir + instcode + '/' + night + '/' + eindex['value'][k])"], {}), "(outdir + instcode + '/' + night + '/' + eindex['value'][k])\n", (2992, 3052), False, 'import os\n'), ((3297, 3308), 'time.time', 'time.time', ([], {}), '()\n', (3306, 3308), False, 'import time\n'), ((1395, 1406), 'time.time', 'time.time', ([], {}), '()\n', (1404, 1406), False, 'import time\n')] |
# -*- coding: utf-8 -*-
import unittest
import unittest.mock as mock
import requests
import json
from io import BytesIO
from fastapi.testclient import TestClient
from projects.api.main import app
from projects.database import session_scope
import tests.util as util
app.dependency_overrides[session_scope] = util.override_session_scope
TEST_CLIENT = TestClient(app)
class TestPredictions(unittest.TestCase):
maxDiff = None
def setUp(self):
"""
Sets up the test before running it.
"""
util.create_mocks()
def tearDown(self):
"""
Deconstructs the test after running it.
"""
util.delete_mocks()
def test_create_prediction_deployments_does_not_exist(self):
"""
Should return an http status 404 and an error message "The specified deployment does not exist".
"""
project_id = util.MOCK_UUID_1
deployment_id = "unk"
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions"
)
result = rv.json()
expected = {
"message": "The specified deployment does not exist",
"code": "DeploymentNotFound",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 404)
def test_create_prediction_projects_does_not_exist(self):
"""
Should return an http status 404 and an error message "The specified projects does not exist".
"""
project_id = "unk"
deployment_id = util.MOCK_UUID_1
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions"
)
result = rv.json()
expected = {
"message": "The specified project does not exist",
"code": "ProjectNotFound",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 404)
def test_create_prediction_form_required(self):
"""
Should return an http status 400 and a message 'either form-data or json is required'.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions"
)
result = rv.json()
expected = {
"message": "either form-data or json is required",
"code": "MissingRequiredFormDataOrJson",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 400)
def test_create_prediction_dataset_name_required(self):
"""
Should return an http status 400 and a message 'either dataset name or file is required'.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions", json={}
)
result = rv.json()
expected = {
"message": "either dataset name or file is required",
"code": "MissingRequiredDatasetOrFile",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 400)
@mock.patch(
"projects.controllers.predictions.load_dataset",
side_effect=util.FILE_NOT_FOUND_ERROR,
)
def test_create_prediction_dataset_required(self, mock_load_dataset):
"""
Should return an http status 400 and a message 'a valid dataset is required'.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
name_dataset = "unk"
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions",
json={"dataset": name_dataset},
)
result = rv.json()
expected = {
"message": "a valid dataset is required",
"code": "InvalidDataset",
}
self.assertEqual(result, expected)
self.assertEqual(rv.status_code, 400)
mock_load_dataset.assert_any_call(name_dataset)
@mock.patch(
"projects.controllers.predictions.load_dataset",
return_value=util.IRIS_DATAFRAME,
)
@mock.patch(
"requests.post",
return_value=util.MOCK_POST_PREDICTION,
)
def test_create_prediction_dataset(
self,
mock_requests_post,
mock_load_dataset,
):
"""
Should load dataset request successfully.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
name = util.IRIS_DATASET_NAME
url = "http://uuid-1-model.anonymous:8000/api/v1.0/predictions"
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions",
json={"dataset": name},
)
result = rv.json()
self.assertIsInstance(result, dict)
self.assertEqual(rv.status_code, 200)
mock_load_dataset.assert_any_call(name)
mock_requests_post.assert_any_call(
url=url,
json={
"data": {
"names": [
"SepalLengthCm",
"SepalWidthCm",
"PetalLengthCm",
"PetalWidthCm",
"Species",
],
"ndarray": [
[5.1, 3.5, 1.4, 0.2, "Iris-setosa"],
[4.9, 3.0, 1.4, 0.2, "Iris-setosa"],
[4.7, 3.2, 1.3, 0.2, "Iris-setosa"],
[4.6, 3.1, 1.5, 0.2, "Iris-setosa"],
],
}
},
)
@mock.patch(
"projects.controllers.predictions.load_dataset",
return_value=util.IRIS_DATAFRAME,
)
@mock.patch(
"requests.post",
return_value=util.MOCK_POST_PREDICTION,
)
def test_create_prediction_dataset_image(
self,
mock_requests_post,
mock_load_dataset,
):
"""
Should load the dataset request with an image successfully.
"""
project_id = util.MOCK_UUID_1
deployment_id = util.MOCK_UUID_1
dataset_name = "mock.jpg"
url = "http://uuid-1-model.anonymous:8000/api/v1.0/predictions"
rv = TEST_CLIENT.post(
f"/projects/{project_id}/deployments/{deployment_id}/predictions",
json={"dataset": dataset_name},
)
result = rv.json()
self.assertIsInstance(result, dict)
self.assertEqual(rv.status_code, 200)
mock_load_dataset.assert_any_call(dataset_name)
mock_requests_post.assert_any_call(
url=url,
json={
"data": {
"names": [
"SepalLengthCm",
"SepalWidthCm",
"PetalLengthCm",
"PetalWidthCm",
"Species",
],
"ndarray": [
[5.1, 3.5, 1.4, 0.2, "Iris-setosa"],
[4.9, 3.0, 1.4, 0.2, "Iris-setosa"],
[4.7, 3.2, 1.3, 0.2, "Iris-setosa"],
[4.6, 3.1, 1.5, 0.2, "Iris-setosa"],
],
}
},
)
| [
"fastapi.testclient.TestClient",
"tests.util.create_mocks",
"unittest.mock.patch",
"tests.util.delete_mocks"
] | [((354, 369), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (364, 369), False, 'from fastapi.testclient import TestClient\n'), ((3238, 3341), 'unittest.mock.patch', 'mock.patch', (['"""projects.controllers.predictions.load_dataset"""'], {'side_effect': 'util.FILE_NOT_FOUND_ERROR'}), "('projects.controllers.predictions.load_dataset', side_effect=\n util.FILE_NOT_FOUND_ERROR)\n", (3248, 3341), True, 'import unittest.mock as mock\n'), ((4119, 4217), 'unittest.mock.patch', 'mock.patch', (['"""projects.controllers.predictions.load_dataset"""'], {'return_value': 'util.IRIS_DATAFRAME'}), "('projects.controllers.predictions.load_dataset', return_value=\n util.IRIS_DATAFRAME)\n", (4129, 4217), True, 'import unittest.mock as mock\n'), ((4241, 4308), 'unittest.mock.patch', 'mock.patch', (['"""requests.post"""'], {'return_value': 'util.MOCK_POST_PREDICTION'}), "('requests.post', return_value=util.MOCK_POST_PREDICTION)\n", (4251, 4308), True, 'import unittest.mock as mock\n'), ((5744, 5842), 'unittest.mock.patch', 'mock.patch', (['"""projects.controllers.predictions.load_dataset"""'], {'return_value': 'util.IRIS_DATAFRAME'}), "('projects.controllers.predictions.load_dataset', return_value=\n util.IRIS_DATAFRAME)\n", (5754, 5842), True, 'import unittest.mock as mock\n'), ((5866, 5933), 'unittest.mock.patch', 'mock.patch', (['"""requests.post"""'], {'return_value': 'util.MOCK_POST_PREDICTION'}), "('requests.post', return_value=util.MOCK_POST_PREDICTION)\n", (5876, 5933), True, 'import unittest.mock as mock\n'), ((531, 550), 'tests.util.create_mocks', 'util.create_mocks', ([], {}), '()\n', (548, 550), True, 'import tests.util as util\n'), ((656, 675), 'tests.util.delete_mocks', 'util.delete_mocks', ([], {}), '()\n', (673, 675), True, 'import tests.util as util\n')] |
from __future__ import absolute_import
import unittest
import sys
from testutils import ADMIN_CLIENT
from testutils import harbor_server
from library.project import Project
from library.user import User
from library.repository import Repository
from library.repository import push_image_to_project
from library.registry import Registry
from library.artifact import Artifact
from library.tag_immutability import Tag_Immutability
from library.repository import push_special_image_to_project
class TestTagImmutability(unittest.TestCase):
@classmethod
def setUpClass(self):
self.url = ADMIN_CLIENT["endpoint"]
self.user_password = "<PASSWORD>"
self.project= Project()
self.user= User()
self.repo= Repository()
self.registry = Registry()
self.artifact = Artifact()
self.tag_immutability = Tag_Immutability()
self.project_id, self.project_name, self.user_id, self.user_name = [None] * 4
self.user_id, self.user_name = self.user.create_user(user_password = self.user_password, **ADMIN_CLIENT)
self.USER_CLIENT = dict(with_signature = True, with_immutable_status = True, endpoint = self.url, username = self.user_name, password = self.user_password)
self.exsiting_rule = dict(selector_repository="rel*", selector_tag="v2.*")
self.project_id, self.project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
def check_tag_immutability(self, artifact, tag_name, status = True):
for tag in artifact.tags:
if tag.name == tag_name:
self.assertTrue(tag.immutable == status)
return
raise Exception("No tag {} found in artifact {}".format(tag, artifact))
def test_disability_of_rules(self):
"""
Test case:
Test Disability Of Rules
Test step and expected result:
1. Create a new project;
2. Push image A to the project with 2 tags A and B;
3. Create a disabled rule matched image A with tag A;
4. Both tags of image A should not be immutable;
5. Enable this rule;
6. image A with tag A should be immutable.
"""
image_a = dict(name="image_disability_a", tag1="latest", tag2="6.2.2")
#1. Create a new project;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A to the project with 2 tags;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a disabled rule matched image A;
rule_id = self.tag_immutability.create_rule(project_id, disabled = True, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Both tags of image A should not be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_disability_of_rules] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = False)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Enable this rule;
self.tag_immutability.update_tag_immutability_policy_rule(project_id, rule_id, disabled = False, **self.USER_CLIENT)
#6. image A with tag A should be immutable.
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_disability_of_rules] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
def test_artifact_and_repo_is_undeletable(self):
"""
Test case:
Test Artifact And Repo is Undeleteable
Test step and expected result:
1. Create a new project;
2. Push image A to the project with 2 tags A and B;
3. Create a enabled rule matched image A with tag A;
4. Tag A should be immutable;
5. Artifact is undeletable;
6. Repository is undeletable.
"""
image_a = dict(name="image_repo_undeletable_a", tag1="latest", tag2="1.3.2")
#1. Create a new project;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A to the project with 2 tags A and B;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_artifact_and_repo_is_undeletable] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Artifact is undeletable;
self.artifact.delete_artifact(project_name, image_a["name"], image_a["tag1"], expect_status_code = 412,expect_response_body = "configured as immutable, cannot be deleted", **self.USER_CLIENT)
#6. Repository is undeletable.
self.repo.delete_repoitory(project_name, image_a["name"], expect_status_code = 412, expect_response_body = "configured as immutable, cannot be deleted", **self.USER_CLIENT)
def test_tag_is_undeletable(self):
"""
Test case:
Test Tag is Undeleteable
Test step and expected result:
1. Push image A to the project with 2 tags A and B;
2. Create a enabled rule matched image A with tag A;
3. Tag A should be immutable;
4. Tag A is undeletable;
5. Tag B is deletable.
"""
image_a = dict(name="image_undeletable_a", tag1="latest", tag2="9.3.2")
#1. Push image A to the project with 2 tags A and B;
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#2. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(self.project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag2"])[0:2] + "*", **self.USER_CLIENT)
#3. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(self.project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_tag_is_undeletable] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = True)
#4. Tag A is undeletable;
self.artifact.delete_tag(self.project_name, image_a["name"], image_a["tag1"], image_a["tag2"], expect_status_code = 412, **self.USER_CLIENT)
#5. Tag B is deletable.
self.artifact.delete_tag(self.project_name, image_a["name"], image_a["tag1"], image_a["tag1"], **self.USER_CLIENT)
def test_image_is_unpushable(self):
"""
Test case:
Test Image is Unpushable
Test step and expected result:
1. Create a new project;
2. Push image A to the project with 2 tags A and B;
3. Create a enabled rule matched image A with tag A;
4. Tag A should be immutable;
5. Can not push image with the same image name and with the same tag name.
"""
image_a = dict(name="image_unpushable_a", tag1="latest", tag2="1.3.2")
#1. Create a new project;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A to the project with 2 tags A and B;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_image_is_unpushable] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Can not push image with the same image name and with the same tag name.
push_image_to_project(project_name, harbor_server, self.user_name, self.user_password, "<PASSWORD>", image_a["tag1"],
new_image = image_a["name"], expected_error_message = "configured as immutable")
def test_copy_disability(self):
"""
Test case:
Test Copy Disability
Test step and expected result:
1. Create 2 projects;
2. Push image A with tag A and B to project A, push image B which has the same image name and tag name to project B;
3. Create a enabled rule matched image A with tag A;
4. Tag A should be immutable;
5. Can not copy artifact from project A to project B with the same repository name.
"""
image_a = dict(name="image_copy_disability_a", tag1="latest", tag2="1.3.2")
#1. Create 2 projects;
project_id, project_name = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
_, project_name_src = self.project.create_project(metadata = {"public": "false"}, **self.USER_CLIENT)
#2. Push image A with tag A and B to project A, push image B which has the same image name and tag name to project B;
push_special_image_to_project(project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
push_special_image_to_project(project_name_src, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"], image_a["tag2"]])
#3. Create a enabled rule matched image A with tag A;
self.tag_immutability.create_rule(project_id, selector_repository=image_a["name"], selector_tag=str(image_a["tag1"])[0:2] + "*", **self.USER_CLIENT)
#4. Tag A should be immutable;
artifact_a = self.artifact.get_reference_info(project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_copy_disability] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = False)
#5. Can not copy artifact from project A to project B with the same repository name.
artifact_a_src = self.artifact.get_reference_info(project_name_src, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_copy_disability] - artifact_a_src:{}".format(artifact_a_src))
self.artifact.copy_artifact(project_name, image_a["name"], project_name_src+"/"+ image_a["name"] + "@" + artifact_a_src.digest, expect_status_code=412, expect_response_body = "configured as immutable, cannot be updated", **self.USER_CLIENT)
#def test_replication_disability(self):
# pass
def test_priority_of_rules(self):
"""
Test case:
Test Priority Of Rules(excluding rule will not affect matching rule)
Test step and expected result:
1. Push image A, B and C, image A has only 1 tag named tag1;
2. Create a matching rule that matches image A and tag named tag2 which is not exist;
3. Create a excluding rule to exlude image A and B;
4. Add a tag named tag2 to image A, tag2 should be immutable;
5. Tag2 should be immutable;
6. All tags in image B should be immutable;
7. All tags in image C should not be immutable;
8. Disable all rules.
"""
image_a = dict(name="image_priority_a", tag1="latest", tag2="6.3.2")
image_b = dict(name="image_priority_b", tag1="latest", tag2="0.12.0")
image_c = dict(name="image_priority_c", tag1="latest", tag2="3.12.0")
#1. Push image A, B and C, image A has only 1 tag named tag1;
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_a["name"], [image_a["tag1"]])
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_b["name"], [image_b["tag1"],image_b["tag2"]])
push_special_image_to_project(self.project_name, harbor_server, self.user_name, self.user_password, image_c["name"], [image_c["tag1"],image_c["tag2"]])
#2. Create a matching rule that matches image A and tag named tag2 which is not exist;
rule_id_1 = self.tag_immutability.create_rule(self.project_id, selector_repository=image_a["name"], selector_tag=image_a["tag2"], **self.USER_CLIENT)
#3. Create a excluding rule to exlude image A and B;
rule_id_2 = self.tag_immutability.create_rule(self.project_id, selector_repository_decoration = "repoExcludes",
selector_repository="{image_priority_a,image_priority_b}", selector_tag="**", **self.USER_CLIENT)
#4. Add a tag named tag2 to image A, tag2 should be immutable;
self.artifact.create_tag(self.project_name, image_a["name"], image_a["tag1"], image_a["tag2"], **self.USER_CLIENT)
#5. Tag2 should be immutable;
artifact_a = self.artifact.get_reference_info(self.project_name, image_a["name"], image_a["tag2"], **self.USER_CLIENT)
print("[test_priority_of_rules] - artifact:{}".format(artifact_a))
self.assertTrue(artifact_a)
self.check_tag_immutability(artifact_a, image_a["tag2"], status = True)
self.check_tag_immutability(artifact_a, image_a["tag1"], status = False)
#6. All tags in image B should be immutable;
artifact_b = self.artifact.get_reference_info(self.project_name, image_b["name"], image_b["tag2"], **self.USER_CLIENT)
print("[test_priority_of_rules] - artifact:{}".format(artifact_b))
self.assertTrue(artifact_b)
self.check_tag_immutability(artifact_b, image_b["tag2"], status = False)
self.check_tag_immutability(artifact_b, image_b["tag1"], status = False)
#7. All tags in image C should not be immutable;
artifact_c = self.artifact.get_reference_info(self.project_name, image_c["name"], image_c["tag2"], **self.USER_CLIENT)
print("[test_priority_of_rules] - artifact:{}".format(artifact_c))
self.assertTrue(artifact_c)
self.check_tag_immutability(artifact_c, image_c["tag2"], status = True)
self.check_tag_immutability(artifact_c, image_c["tag1"], status = True)
#8. Disable all rules.
self.tag_immutability.update_tag_immutability_policy_rule(self.project_id, rule_id_1, disabled = True, **self.USER_CLIENT)
self.tag_immutability.update_tag_immutability_policy_rule(self.project_id, rule_id_2, disabled = True, **self.USER_CLIENT)
def test_add_exsiting_rule(self):
"""
Test case:
Test Priority Of Rules(excluding rule will not affect matching rule)
Test step and expected result:
1. Push image A and B with no tag;
2. Create a immutability policy rule A;
3. Fail to create rule B which has the same config as rule A;
"""
self.tag_immutability.create_tag_immutability_policy_rule(self.project_id, **self.exsiting_rule, **self.USER_CLIENT)
self.tag_immutability.create_tag_immutability_policy_rule(self.project_id, **self.exsiting_rule, expect_status_code = 409, **self.USER_CLIENT)
@classmethod
def tearDownClass(self):
print("Case completed")
if __name__ == '__main__':
suite = unittest.TestSuite(unittest.makeSuite(TestTagImmutability))
result = unittest.TextTestRunner(sys.stdout, verbosity=2, failfast=True).run(suite)
if not result.wasSuccessful():
raise Exception(r"Tag immutability test failed: {}".format(result))
| [
"library.repository.push_special_image_to_project",
"unittest.makeSuite",
"library.registry.Registry",
"unittest.TextTestRunner",
"library.repository.push_image_to_project",
"library.artifact.Artifact",
"library.project.Project",
"library.user.User",
"library.tag_immutability.Tag_Immutability",
"l... | [((712, 721), 'library.project.Project', 'Project', ([], {}), '()\n', (719, 721), False, 'from library.project import Project\n'), ((742, 748), 'library.user.User', 'User', ([], {}), '()\n', (746, 748), False, 'from library.user import User\n'), ((769, 781), 'library.repository.Repository', 'Repository', ([], {}), '()\n', (779, 781), False, 'from library.repository import Repository\n'), ((807, 817), 'library.registry.Registry', 'Registry', ([], {}), '()\n', (815, 817), False, 'from library.registry import Registry\n'), ((843, 853), 'library.artifact.Artifact', 'Artifact', ([], {}), '()\n', (851, 853), False, 'from library.artifact import Artifact\n'), ((887, 905), 'library.tag_immutability.Tag_Immutability', 'Tag_Immutability', ([], {}), '()\n', (903, 905), False, 'from library.tag_immutability import Tag_Immutability\n'), ((2581, 2732), 'library.repository.push_special_image_to_project', 'push_special_image_to_project', (['project_name', 'harbor_server', 'self.user_name', 'self.user_password', "image_a['name']", "[image_a['tag1'], image_a['tag2']]"], {}), "(project_name, harbor_server, self.user_name,\n self.user_password, image_a['name'], [image_a['tag1'], image_a['tag2']])\n", (2610, 2732), False, 'from library.repository import push_special_image_to_project\n'), ((4849, 5000), 'library.repository.push_special_image_to_project', 'push_special_image_to_project', (['project_name', 'harbor_server', 'self.user_name', 'self.user_password', "image_a['name']", "[image_a['tag1'], image_a['tag2']]"], {}), "(project_name, harbor_server, self.user_name,\n self.user_password, image_a['name'], [image_a['tag1'], image_a['tag2']])\n", (4878, 5000), False, 'from library.repository import push_special_image_to_project\n'), ((6709, 6870), 'library.repository.push_special_image_to_project', 'push_special_image_to_project', (['self.project_name', 'harbor_server', 'self.user_name', 'self.user_password', "image_a['name']", "[image_a['tag1'], image_a['tag2']]"], {}), "(self.project_name, harbor_server, self.\n user_name, self.user_password, image_a['name'], [image_a['tag1'],\n image_a['tag2']])\n", (6738, 6870), False, 'from library.repository import push_special_image_to_project\n'), ((8574, 8725), 'library.repository.push_special_image_to_project', 'push_special_image_to_project', (['project_name', 'harbor_server', 'self.user_name', 'self.user_password', "image_a['name']", "[image_a['tag1'], image_a['tag2']]"], {}), "(project_name, harbor_server, self.user_name,\n self.user_password, image_a['name'], [image_a['tag1'], image_a['tag2']])\n", (8603, 8725), False, 'from library.repository import push_special_image_to_project\n'), ((9484, 9687), 'library.repository.push_image_to_project', 'push_image_to_project', (['project_name', 'harbor_server', 'self.user_name', 'self.user_password', '"""<PASSWORD>"""', "image_a['tag1']"], {'new_image': "image_a['name']", 'expected_error_message': '"""configured as immutable"""'}), "(project_name, harbor_server, self.user_name, self.\n user_password, '<PASSWORD>', image_a['tag1'], new_image=image_a['name'],\n expected_error_message='configured as immutable')\n", (9505, 9687), False, 'from library.repository import push_image_to_project\n'), ((10728, 10879), 'library.repository.push_special_image_to_project', 'push_special_image_to_project', (['project_name', 'harbor_server', 'self.user_name', 'self.user_password', "image_a['name']", "[image_a['tag1'], image_a['tag2']]"], {}), "(project_name, harbor_server, self.user_name,\n self.user_password, image_a['name'], [image_a['tag1'], image_a['tag2']])\n", (10757, 10879), False, 'from library.repository import push_special_image_to_project\n'), ((10885, 11045), 'library.repository.push_special_image_to_project', 'push_special_image_to_project', (['project_name_src', 'harbor_server', 'self.user_name', 'self.user_password', "image_a['name']", "[image_a['tag1'], image_a['tag2']]"], {}), "(project_name_src, harbor_server, self.\n user_name, self.user_password, image_a['name'], [image_a['tag1'],\n image_a['tag2']])\n", (10914, 11045), False, 'from library.repository import push_special_image_to_project\n'), ((13357, 13497), 'library.repository.push_special_image_to_project', 'push_special_image_to_project', (['self.project_name', 'harbor_server', 'self.user_name', 'self.user_password', "image_a['name']", "[image_a['tag1']]"], {}), "(self.project_name, harbor_server, self.\n user_name, self.user_password, image_a['name'], [image_a['tag1']])\n", (13386, 13497), False, 'from library.repository import push_special_image_to_project\n'), ((13502, 13663), 'library.repository.push_special_image_to_project', 'push_special_image_to_project', (['self.project_name', 'harbor_server', 'self.user_name', 'self.user_password', "image_b['name']", "[image_b['tag1'], image_b['tag2']]"], {}), "(self.project_name, harbor_server, self.\n user_name, self.user_password, image_b['name'], [image_b['tag1'],\n image_b['tag2']])\n", (13531, 13663), False, 'from library.repository import push_special_image_to_project\n'), ((13663, 13824), 'library.repository.push_special_image_to_project', 'push_special_image_to_project', (['self.project_name', 'harbor_server', 'self.user_name', 'self.user_password', "image_c['name']", "[image_c['tag1'], image_c['tag2']]"], {}), "(self.project_name, harbor_server, self.\n user_name, self.user_password, image_c['name'], [image_c['tag1'],\n image_c['tag2']])\n", (13692, 13824), False, 'from library.repository import push_special_image_to_project\n'), ((17071, 17110), 'unittest.makeSuite', 'unittest.makeSuite', (['TestTagImmutability'], {}), '(TestTagImmutability)\n', (17089, 17110), False, 'import unittest\n'), ((17126, 17189), 'unittest.TextTestRunner', 'unittest.TextTestRunner', (['sys.stdout'], {'verbosity': '(2)', 'failfast': '(True)'}), '(sys.stdout, verbosity=2, failfast=True)\n', (17149, 17189), False, 'import unittest\n')] |
"""
Designed to be used in conjunction with xtream1101/humblebundle-downloader.
Takes the download directory of that script, then copies all file types of
each non-comic book to a chosen directory.
Each folder in the target directory will be one book, containing the
different file formats available for the book. They are not separated by
bundles, since this way you can import directories and subdirectories in
calibre, then choose to assume all e-book files in a directory are the same
book in different formats.
Renaming files after they are copied will not result in files being copied
again, as this script keeps a JSON in the given source directory recording
all files previously copied to a target folder.
"""
import sys
import os
import shutil
import json
def traverseBundles(source,target,hbdJSON,copiedJSON):
for bundleName in os.listdir(source):
bundleSource = source+"/"+bundleName
bundleTarget = target
# I don't think any comic bundles have regular books in them.
if os.path.isdir(bundleSource) and "comic" not in bundleName.lower():
traverseBundleItems(bundleSource,bundleTarget,hbdJSON,copiedJSON)
def traverseBundleItems(source,target,hbdJSON,copiedJSON):
for itemName in os.listdir(source):
if itemName not in copiedJSON:
itemPath = source+"/"+itemName
itemTarget = target+"/"+itemName
traverseFiles(itemPath,itemTarget,hbdJSON,copiedJSON,itemName)
def traverseFiles(source,target,hbdJSON,copiedJSON,itemName):
isComic = False
isBook = False
copyList = []
if "comic" in itemName.lower():
isComic = True
for fileName in os.listdir(source):
extension = os.path.splitext(fileName)[1]
# if the item is available as a .cb* file, it's most likely a comic book
# if there is no extension, it's probably a binary
if extension == "":
break
elif ".cb" in extension:
isComic = True
break
elif extension in ".pdf.epub.mobi":
isBook = True
copyList.append((source+"/"+fileName,target+"/"+fileName))
if (isBook) and (not isComic) and (copyList):
os.makedirs(target, exist_ok=True)
copyFiles(itemName,copyList)
copiedDict.update({itemName:"book"})
def copyFiles(itemName,copyList):
if copyList:
for copyJob in copyList:
shutil.copyfile(copyJob[0],copyJob[1])
print(copyJob[0]+"\n-->"+copyJob[1]+"\n")
if __name__ == "__main__":
if len(sys.argv) != 3:
print("\nInvalid parameters.\nUsage: ", sys.argv[0], " <path to source> <path to target>\n")
exit(1)
else:
source = sys.argv[1]
target = sys.argv[2]
with open(source+"/.cache.json") as hbdJsonFile:
hbdDict = json.load(hbdJsonFile)
if os.path.exists(source+"/.book-copier.json"):
with open(source+"/.book-copier.json") as copiedJsonFile:
copiedDict = json.load(copiedJsonFile)
else:
copiedDict = {}
traverseBundles(source,target,hbdDict,copiedDict)
with open(source+"/.book-copier.json","w") as copiedJsonFile:
copiedJsonFile.write(json.dumps(copiedDict, sort_keys=True, indent=4))
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"json.dumps",
"os.path.splitext",
"shutil.copyfile",
"os.path.isdir",
"json.load"
] | [((866, 884), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (876, 884), False, 'import os\n'), ((1276, 1294), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (1286, 1294), False, 'import os\n'), ((1700, 1718), 'os.listdir', 'os.listdir', (['source'], {}), '(source)\n', (1710, 1718), False, 'import os\n'), ((2899, 2944), 'os.path.exists', 'os.path.exists', (["(source + '/.book-copier.json')"], {}), "(source + '/.book-copier.json')\n", (2913, 2944), False, 'import os\n'), ((2239, 2273), 'os.makedirs', 'os.makedirs', (['target'], {'exist_ok': '(True)'}), '(target, exist_ok=True)\n', (2250, 2273), False, 'import os\n'), ((2864, 2886), 'json.load', 'json.load', (['hbdJsonFile'], {}), '(hbdJsonFile)\n', (2873, 2886), False, 'import json\n'), ((1051, 1078), 'os.path.isdir', 'os.path.isdir', (['bundleSource'], {}), '(bundleSource)\n', (1064, 1078), False, 'import os\n'), ((1740, 1766), 'os.path.splitext', 'os.path.splitext', (['fileName'], {}), '(fileName)\n', (1756, 1766), False, 'import os\n'), ((2454, 2493), 'shutil.copyfile', 'shutil.copyfile', (['copyJob[0]', 'copyJob[1]'], {}), '(copyJob[0], copyJob[1])\n', (2469, 2493), False, 'import shutil\n'), ((3035, 3060), 'json.load', 'json.load', (['copiedJsonFile'], {}), '(copiedJsonFile)\n', (3044, 3060), False, 'import json\n'), ((3246, 3294), 'json.dumps', 'json.dumps', (['copiedDict'], {'sort_keys': '(True)', 'indent': '(4)'}), '(copiedDict, sort_keys=True, indent=4)\n', (3256, 3294), False, 'import json\n')] |
"""
info_routes.py - Handle the routes for basic information pages.
This module provides the views for the following routes:
/about
/privacy
/terms_and_conditions
Copyright (c) 2019 by <NAME>. All Rights Reserved.
"""
from flask import Blueprint, render_template
info_routes = Blueprint("info_routes", __name__, template_folder="templates")
@info_routes.route('/about', methods=['GET'])
def about():
return render_template('about.html')
@info_routes.route("/privacy/", methods=["GET"])
def privacy():
return render_template("privacy.html")
@info_routes.route("/terms_and_conditions", methods=["GET"])
def terms_and_conditions():
return render_template("terms_and_conditions.html")
| [
"flask.render_template",
"flask.Blueprint"
] | [((282, 345), 'flask.Blueprint', 'Blueprint', (['"""info_routes"""', '__name__'], {'template_folder': '"""templates"""'}), "('info_routes', __name__, template_folder='templates')\n", (291, 345), False, 'from flask import Blueprint, render_template\n'), ((418, 447), 'flask.render_template', 'render_template', (['"""about.html"""'], {}), "('about.html')\n", (433, 447), False, 'from flask import Blueprint, render_template\n'), ((525, 556), 'flask.render_template', 'render_template', (['"""privacy.html"""'], {}), "('privacy.html')\n", (540, 556), False, 'from flask import Blueprint, render_template\n'), ((659, 703), 'flask.render_template', 'render_template', (['"""terms_and_conditions.html"""'], {}), "('terms_and_conditions.html')\n", (674, 703), False, 'from flask import Blueprint, render_template\n')] |
import os
import logging
import tempfile
log = logging.getLogger(__name__)
class PidFile(object):
""" A small helper class for pidfiles. """
PID_DIR = '/var/run/rollbard'
def __init__(self, program, pid_dir=None):
self.pid_file = "%s.pid" % program
self.pid_dir = pid_dir or self.get_default_pid_dir()
self.pid_path = os.path.join(self.pid_dir, self.pid_file)
def get_default_pid_dir(self):
return PidFile.PID_DIR
def get_path(self):
# Can we write to the directory
try:
if os.access(self.pid_dir, os.W_OK):
log.info("Pid file is: %s" % self.pid_path)
return self.pid_path
except Exception:
log.warn("Cannot locate pid file, trying to use: %s" % tempfile.gettempdir())
# if all else fails
if os.access(tempfile.gettempdir(), os.W_OK):
tmp_path = os.path.join(tempfile.gettempdir(), self.pid_file)
log.debug("Using temporary pid file: %s" % tmp_path)
return tmp_path
else:
# Can't save pid file, bail out
log.error("Cannot save pid file anywhere")
raise Exception("Cannot save pid file anywhere")
def clean(self):
try:
path = self.get_path()
log.debug("Cleaning up pid file %s" % path)
os.remove(path)
return True
except Exception:
log.warn("Could not clean up pid file")
return False
def get_pid(self):
"Retrieve the actual pid"
try:
pf = open(self.get_path())
pid_s = pf.read()
pf.close()
return int(pid_s.strip())
except Exception:
return None
| [
"logging.getLogger",
"os.access",
"os.path.join",
"tempfile.gettempdir",
"os.remove"
] | [((48, 75), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (65, 75), False, 'import logging\n'), ((358, 399), 'os.path.join', 'os.path.join', (['self.pid_dir', 'self.pid_file'], {}), '(self.pid_dir, self.pid_file)\n', (370, 399), False, 'import os\n'), ((560, 592), 'os.access', 'os.access', (['self.pid_dir', 'os.W_OK'], {}), '(self.pid_dir, os.W_OK)\n', (569, 592), False, 'import os\n'), ((857, 878), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (876, 878), False, 'import tempfile\n'), ((1369, 1384), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (1378, 1384), False, 'import os\n'), ((926, 947), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (945, 947), False, 'import tempfile\n'), ((784, 805), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (803, 805), False, 'import tempfile\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('server', '0018_auto_20151124_1654'),
]
operations = [
migrations.CreateModel(
name='UpdateHistoryItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('recorded', models.DateTimeField()),
('status', models.CharField(max_length=255, verbose_name=b'Status', choices=[(b'pending', b'Pending'), (b'error', b'Error'), (b'success', b'Success')])),
],
),
migrations.AlterModelOptions(
name='updatehistory',
options={'ordering': ['name']},
),
migrations.AlterUniqueTogether(
name='updatehistory',
unique_together=set([('machine', 'name', 'version')]),
),
migrations.AddField(
model_name='updatehistoryitem',
name='update_history',
field=models.ForeignKey(to='server.UpdateHistory'),
),
migrations.RemoveField(
model_name='updatehistory',
name='recorded',
),
migrations.RemoveField(
model_name='updatehistory',
name='status',
),
]
| [
"django.db.models.ForeignKey",
"django.db.migrations.AlterModelOptions",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((706, 793), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""updatehistory"""', 'options': "{'ordering': ['name']}"}), "(name='updatehistory', options={'ordering': [\n 'name']})\n", (734, 793), False, 'from django.db import models, migrations\n'), ((1168, 1235), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""updatehistory"""', 'name': '"""recorded"""'}), "(model_name='updatehistory', name='recorded')\n", (1190, 1235), False, 'from django.db import models, migrations\n'), ((1280, 1345), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""updatehistory"""', 'name': '"""status"""'}), "(model_name='updatehistory', name='status')\n", (1302, 1345), False, 'from django.db import models, migrations\n'), ((1103, 1147), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""server.UpdateHistory"""'}), "(to='server.UpdateHistory')\n", (1120, 1147), False, 'from django.db import models, migrations\n'), ((356, 449), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (372, 449), False, 'from django.db import models, migrations\n'), ((477, 499), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (497, 499), False, 'from django.db import models, migrations\n'), ((529, 674), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': "b'Status'", 'choices': "[(b'pending', b'Pending'), (b'error', b'Error'), (b'success', b'Success')]"}), "(max_length=255, verbose_name=b'Status', choices=[(\n b'pending', b'Pending'), (b'error', b'Error'), (b'success', b'Success')])\n", (545, 674), False, 'from django.db import models, migrations\n')] |
""" batch iterator"""
from __future__ import absolute_import
import ctypes
from ddls.base import check_call, LIB, c_str, c_array
from ddls.hpps.tensor import Tensor
class Batch(object):
""" The BatchIterator
"""
def __init__(self, handle):
""" The batch from iterator
"""
self.handle = handle
def __del__(self):
""" Delete tensor
"""
check_call(LIB.HPPS_BatchDestroy(self.handle))
def get_tensor(self, name):
""" Return the tensor according to name
"""
out = ctypes.c_void_p()
check_call(LIB.HPPS_BatchGetTensorFromKey(self.handle,
c_str(name),
ctypes.byref(out)))
return Tensor(handle=out, shape=None, type=None)
def names(self):
""" Return the names of tensor
"""
out_names = ctypes.POINTER(ctypes.c_char_p)()
out_size = ctypes.c_int()
check_call(LIB.HPPS_BatchGetKeys(self.handle,
ctypes.byref(out_size),
ctypes.byref(out_names)))
return tuple(out_names[:out_size.value])
def add_indices_tensor(self, names_array):
""" add indices tensor
"""
check_call(LIB.HPPS_AddIndicesTensor(self.handle,
len(names_array),
c_array(ctypes.c_char_p, names_array)))
def add_uniqid_tensor(self, names_array):
""" add uniqid tensor
"""
check_call(LIB.HPPS_AddUniqIdTensor(self.handle,
len(names_array),
c_array(ctypes.c_char_p, names_array)))
| [
"ddls.base.c_array",
"ctypes.byref",
"ctypes.POINTER",
"ddls.base.c_str",
"ddls.base.LIB.HPPS_BatchDestroy",
"ddls.hpps.tensor.Tensor",
"ctypes.c_int",
"ctypes.c_void_p"
] | [((556, 573), 'ctypes.c_void_p', 'ctypes.c_void_p', ([], {}), '()\n', (571, 573), False, 'import ctypes\n'), ((785, 826), 'ddls.hpps.tensor.Tensor', 'Tensor', ([], {'handle': 'out', 'shape': 'None', 'type': 'None'}), '(handle=out, shape=None, type=None)\n', (791, 826), False, 'from ddls.hpps.tensor import Tensor\n'), ((973, 987), 'ctypes.c_int', 'ctypes.c_int', ([], {}), '()\n', (985, 987), False, 'import ctypes\n'), ((413, 447), 'ddls.base.LIB.HPPS_BatchDestroy', 'LIB.HPPS_BatchDestroy', (['self.handle'], {}), '(self.handle)\n', (434, 447), False, 'from ddls.base import check_call, LIB, c_str, c_array\n'), ((920, 951), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_char_p'], {}), '(ctypes.c_char_p)\n', (934, 951), False, 'import ctypes\n'), ((687, 698), 'ddls.base.c_str', 'c_str', (['name'], {}), '(name)\n', (692, 698), False, 'from ddls.base import check_call, LIB, c_str, c_array\n'), ((750, 767), 'ctypes.byref', 'ctypes.byref', (['out'], {}), '(out)\n', (762, 767), False, 'import ctypes\n'), ((1083, 1105), 'ctypes.byref', 'ctypes.byref', (['out_size'], {}), '(out_size)\n', (1095, 1105), False, 'import ctypes\n'), ((1148, 1171), 'ctypes.byref', 'ctypes.byref', (['out_names'], {}), '(out_names)\n', (1160, 1171), False, 'import ctypes\n'), ((1481, 1518), 'ddls.base.c_array', 'c_array', (['ctypes.c_char_p', 'names_array'], {}), '(ctypes.c_char_p, names_array)\n', (1488, 1518), False, 'from ddls.base import check_call, LIB, c_str, c_array\n'), ((1773, 1810), 'ddls.base.c_array', 'c_array', (['ctypes.c_char_p', 'names_array'], {}), '(ctypes.c_char_p, names_array)\n', (1780, 1810), False, 'from ddls.base import check_call, LIB, c_str, c_array\n')] |
import autograd.numpy as anp
import numpy as np
from autograd import value_and_grad
from pymoo.factory import normalize
from pymoo.util.ref_dirs.energy import squared_dist
from pymoo.util.ref_dirs.optimizer import Adam
from pymoo.util.reference_direction import ReferenceDirectionFactory, scale_reference_directions
class LayerwiseRieszEnergyReferenceDirectionFactory(ReferenceDirectionFactory):
def __init__(self,
n_dim,
partitions,
return_as_tuple=False,
n_max_iter=1000,
verbose=False,
X=None,
**kwargs):
super().__init__(n_dim, **kwargs)
self.scalings = None
self.n_max_iter = n_max_iter
self.verbose = verbose
self.return_as_tuple = return_as_tuple
self.X = X
self.partitions = partitions
def _step(self, optimizer, X, scalings):
obj, grad = value_and_grad(calc_potential_energy)(scalings, X)
scalings = optimizer.next(scalings, np.array(grad))
scalings = normalize(scalings, xl=0, xu=scalings.max())
return scalings, obj
def _solve(self, X, scalings):
# initialize the optimizer for the run
optimizer = Adam()
# for each iteration of gradient descent
for i in range(self.n_max_iter):
# execute one optimization step
_scalings, _obj = self._step(optimizer, X, scalings)
# evaluate how much the points have moved
delta = np.abs(_scalings - scalings).sum()
if self.verbose:
print(i, "objective", _obj, "delta", delta)
# if there was only a little delta during the last iteration -> terminate
if delta < 1e-5:
scalings = _scalings
break
# otherwise use the new points for the next iteration
scalings = _scalings
self.scalings = scalings
return get_points(X, scalings)
def do(self):
X = []
scalings = []
for k, p in enumerate(self.partitions):
if p > 1:
val = np.linspace(0, 1, p + 1)[1:-1]
_X = []
for i in range(self.n_dim):
for j in range(i + 1, self.n_dim):
x = np.zeros((len(val), self.n_dim))
x[:, i] = val
x[:, j] = 1 - val
_X.append(x)
X.append(np.row_stack(_X + [np.eye(self.n_dim)]))
elif p == 1:
X.append(np.eye(self.n_dim))
else:
X.append(np.full(self.n_dim, 1 / self.n_dim)[None, :])
scalings.append(1 - k / len(self.partitions))
scalings = np.array(scalings)
X = self._solve(X, scalings)
return X
# ---------------------------------------------------------------------------------------------------------
# Energy Functions
# ---------------------------------------------------------------------------------------------------------
def get_points(X, scalings):
vals = []
for i in range(len(X)):
vals.append(scale_reference_directions(X[i], scalings[i]))
X = anp.row_stack(vals)
return X
def calc_potential_energy(scalings, X):
X = get_points(X, scalings)
i, j = anp.triu_indices(len(X), 1)
D = squared_dist(X, X)[i, j]
if np.any(D < 1e-12):
return np.nan, np.nan
return (1 / D).mean()
| [
"numpy.abs",
"numpy.eye",
"autograd.numpy.row_stack",
"pymoo.util.ref_dirs.optimizer.Adam",
"numpy.any",
"numpy.array",
"pymoo.util.reference_direction.scale_reference_directions",
"numpy.linspace",
"pymoo.util.ref_dirs.energy.squared_dist",
"autograd.value_and_grad",
"numpy.full"
] | [((3256, 3275), 'autograd.numpy.row_stack', 'anp.row_stack', (['vals'], {}), '(vals)\n', (3269, 3275), True, 'import autograd.numpy as anp\n'), ((3444, 3461), 'numpy.any', 'np.any', (['(D < 1e-12)'], {}), '(D < 1e-12)\n', (3450, 3461), True, 'import numpy as np\n'), ((1252, 1258), 'pymoo.util.ref_dirs.optimizer.Adam', 'Adam', ([], {}), '()\n', (1256, 1258), False, 'from pymoo.util.ref_dirs.optimizer import Adam\n'), ((2797, 2815), 'numpy.array', 'np.array', (['scalings'], {}), '(scalings)\n', (2805, 2815), True, 'import numpy as np\n'), ((3411, 3429), 'pymoo.util.ref_dirs.energy.squared_dist', 'squared_dist', (['X', 'X'], {}), '(X, X)\n', (3423, 3429), False, 'from pymoo.util.ref_dirs.energy import squared_dist\n'), ((944, 981), 'autograd.value_and_grad', 'value_and_grad', (['calc_potential_energy'], {}), '(calc_potential_energy)\n', (958, 981), False, 'from autograd import value_and_grad\n'), ((1039, 1053), 'numpy.array', 'np.array', (['grad'], {}), '(grad)\n', (1047, 1053), True, 'import numpy as np\n'), ((3201, 3246), 'pymoo.util.reference_direction.scale_reference_directions', 'scale_reference_directions', (['X[i]', 'scalings[i]'], {}), '(X[i], scalings[i])\n', (3227, 3246), False, 'from pymoo.util.reference_direction import ReferenceDirectionFactory, scale_reference_directions\n'), ((1535, 1563), 'numpy.abs', 'np.abs', (['(_scalings - scalings)'], {}), '(_scalings - scalings)\n', (1541, 1563), True, 'import numpy as np\n'), ((2159, 2183), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(p + 1)'], {}), '(0, 1, p + 1)\n', (2170, 2183), True, 'import numpy as np\n'), ((2609, 2627), 'numpy.eye', 'np.eye', (['self.n_dim'], {}), '(self.n_dim)\n', (2615, 2627), True, 'import numpy as np\n'), ((2672, 2707), 'numpy.full', 'np.full', (['self.n_dim', '(1 / self.n_dim)'], {}), '(self.n_dim, 1 / self.n_dim)\n', (2679, 2707), True, 'import numpy as np\n'), ((2536, 2554), 'numpy.eye', 'np.eye', (['self.n_dim'], {}), '(self.n_dim)\n', (2542, 2554), True, 'import numpy as np\n')] |
import numpy as np
import math
import fatpack
import matplotlib.pyplot as plt
import pandas as pd
#Create a function that reutrns the Goodman correction:
def Goodman_method_correction(M_a,M_m,M_max):
M_u = 1.5*M_max
M_ar = M_a/(1-M_m/M_u)
return M_ar
def Equivalent_bending_moment(M_ar,Neq,m):
P = M_ar.shape
M_sum = 0
j = P[0]
for i in range(j):
M_sum = math.pow(M_ar[i],m) + M_sum
M_eq = math.pow((M_sum/Neq),(1/m))
return M_eq
def get_DEL(y,Neq,m):
S, Sm = fatpack.find_rainflow_ranges(y.flatten(), return_means=True, k=256)
data_arr = np.array([Sm , S ]).T
M_ar = Goodman_method_correction(data_arr[:,1],data_arr[:,0],np.max(S))
print(sum(M_ar.shape))
M_eq = Equivalent_bending_moment(M_ar,Neq,m)
return M_eq
| [
"math.pow",
"numpy.array",
"numpy.max"
] | [((433, 461), 'math.pow', 'math.pow', (['(M_sum / Neq)', '(1 / m)'], {}), '(M_sum / Neq, 1 / m)\n', (441, 461), False, 'import math\n'), ((596, 613), 'numpy.array', 'np.array', (['[Sm, S]'], {}), '([Sm, S])\n', (604, 613), True, 'import numpy as np\n'), ((683, 692), 'numpy.max', 'np.max', (['S'], {}), '(S)\n', (689, 692), True, 'import numpy as np\n'), ((394, 414), 'math.pow', 'math.pow', (['M_ar[i]', 'm'], {}), '(M_ar[i], m)\n', (402, 414), False, 'import math\n')] |
# Programmer friendly subprocess wrapper.
#
# Author: <NAME> <<EMAIL>>
# Last Change: March 2, 2020
# URL: https://executor.readthedocs.io
"""
Portable process control functionality for the `executor` package.
The :mod:`executor.process` module defines the :class:`ControllableProcess`
abstract base class which enables process control features like waiting for a
process to end, gracefully terminating it and forcefully killing it. The
process control functionality in :class:`ControllableProcess` is separated from
the command execution functionality in :class:`~executor.ExternalCommand` to
make it possible to re-use the process control functionality in other Python
packages, see for example the :class:`proc.core.Process` class.
"""
# Standard library modules.
import logging
# External dependencies.
from humanfriendly import Timer
from humanfriendly.terminal.spinners import Spinner
from property_manager import PropertyManager, mutable_property, required_property
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
"""The default timeout used to wait for process termination (number of seconds)."""
class ControllableProcess(PropertyManager):
"""
Abstract, portable process control functionality.
By defining a subclass of :class:`ControllableProcess` and implementing the
:attr:`pid`, :attr:`command_line` and :attr:`is_running` properties and the
:func:`terminate_helper()` and :func:`kill_helper()` methods you get the
:func:`wait_for_process()`, :func:`terminate()` and :func:`kill()` methods
for free. This decoupling has enabled me to share a lot of code between two
Python projects of mine with similar goals but very different
requirements:
1. The `executor` package builds on top of the :mod:`subprocess` module
in the Python standard library and strives to be as cross platform
as possible. This means things like UNIX signals are not an option
(although signals exist on Windows they are hardly usable). The package
mostly deals with :class:`subprocess.Popen` objects internally (to hide
platform specific details as much as possible).
2. The proc_ package exposes process information available in the Linux
process information pseudo-file system available at ``/proc``. The
package mostly deals with process IDs internally. Because this is
completely specialized to a UNIX environment the use of things
like UNIX signals is not a problem at all.
.. _proc: http://proc.readthedocs.org/en/latest/
"""
@mutable_property
def command_line(self):
"""
A list of strings with the command line used to start the process.
This property may be set or implemented by subclasses to enable
:func:`__str__()` to render a human friendly representation of a
:class:`ControllableProcess` object.
"""
return []
@property
def is_running(self):
"""
:data:`True` if the process is running, :data:`False` otherwise.
This property must be implemented by subclasses to enable
:func:`wait_for_process()`, :func:`terminate()` and :func:`kill()` to
work properly.
"""
raise NotImplementedError("You need to implement the `is_running' property!")
@mutable_property
def logger(self):
"""
The :class:`logging.Logger` object to use (defaults to the :mod:`executor.process` logger).
If you are using Python's :mod:`logging` module and you find it
confusing that command manipulation is logged under the
:mod:`executor.process` name space instead of the name space of the
application or library using :mod:`executor` you can set this
attribute to inject a custom (and more appropriate) logger.
"""
return logger
@mutable_property
def pid(self):
"""
The process ID (a number) or :data:`None`.
This property must be set or implemented by subclasses:
- It provides :func:`wait_for_process()` with a short and unique
representation of a process that most users will understand.
- It enables :func:`__str__()` to render a human friendly
representation of a :class:`ControllableProcess` object.
"""
def wait_for_process(self, timeout=0, use_spinner=None):
"""
Wait until the process ends or the timeout expires.
:param timeout: The number of seconds to wait for the process to
terminate after we've asked it nicely (defaults
to zero which means we wait indefinitely).
:param use_spinner: Whether or not to display an interactive spinner
on the terminal (using :class:`~humanfriendly.Spinner`)
to explain to the user what they are waiting for:
- :data:`True` enables the spinner,
- :data:`False` disables the spinner,
- :data:`None` (the default) means the spinner is
enabled when the program is connected to an
interactive terminal, otherwise it's disabled.
:returns: A :class:`~humanfriendly.Timer` object telling you how long
it took to wait for the process.
"""
with Timer(resumable=True) as timer:
with Spinner(interactive=use_spinner, timer=timer) as spinner:
while self.is_running:
if timeout and timer.elapsed_time >= timeout:
break
spinner.step(label="Waiting for process %i to terminate" % self.pid)
spinner.sleep()
return timer
def terminate(self, wait=True, timeout=DEFAULT_TIMEOUT, use_spinner=None):
"""
Gracefully terminate the process.
:param wait: Whether to wait for the process to end (a boolean,
defaults to :data:`True`).
:param timeout: The number of seconds to wait for the process to
terminate after we've signaled it (defaults to
:data:`DEFAULT_TIMEOUT`). Zero means to wait
indefinitely.
:param use_spinner: See the :func:`wait_for_process()` documentation.
:returns: :data:`True` if the process was terminated, :data:`False`
otherwise.
:raises: Any exceptions raised by :func:`terminate_helper()`
implementations of subclasses or :func:`kill()`.
This method works as follows:
1. Signal the process to gracefully terminate itself. Processes can
choose to intercept termination signals to allow for graceful
termination (many UNIX daemons work like this) however the default
action is to simply exit immediately.
2. If `wait` is :data:`True` and we've signaled the process, we wait
for it to terminate gracefully or `timeout` seconds have passed
(whichever comes first).
3. If `wait` is :data:`True` and the process is still running after
`timeout` seconds have passed, it will be forcefully terminated
using :func:`kill()` (the value of `timeout` that was given to
:func:`terminate()` will be passed on to :func:`kill()`).
This method does nothing when :attr:`is_running` is :data:`False`.
"""
if self.is_running:
self.logger.info("Gracefully terminating process %s ..", self)
self.terminate_helper()
if wait:
timer = self.wait_for_process(timeout=timeout, use_spinner=use_spinner)
if self.is_running:
self.logger.warning("Failed to gracefully terminate process! (waited %s)", timer)
return self.kill(wait=True, timeout=timeout)
else:
self.logger.info("Successfully terminated process in %s.", timer)
return True
return not self.is_running
else:
return False
def terminate_helper(self):
"""Request the process to gracefully terminate itself (needs to be implemented by subclasses)."""
raise NotImplementedError("You need to implement the terminate_helper() method!")
def kill(self, wait=True, timeout=DEFAULT_TIMEOUT, use_spinner=None):
"""
Forcefully kill the process.
:param wait: Whether to wait for the process to end (a boolean,
defaults to :data:`True`).
:param timeout: The number of seconds to wait for the process to
terminate after we've signaled it (defaults to
:data:`DEFAULT_TIMEOUT`). Zero means to wait
indefinitely.
:param use_spinner: See the :func:`wait_for_process()` documentation.
:returns: :data:`True` if the process was killed, :data:`False`
otherwise.
:raises: - Any exceptions raised by :func:`kill_helper()`
implementations of subclasses.
- :exc:`ProcessTerminationFailed` if the process is still
running after :func:`kill_helper()` and
:func:`wait_for_process()` have been called.
This method does nothing when :attr:`is_running` is :data:`False`.
"""
if self.is_running:
self.logger.info("Forcefully killing process %s ..", self)
self.kill_helper()
if wait:
timer = self.wait_for_process(timeout=timeout, use_spinner=use_spinner)
if self.is_running:
self.logger.warning("Failed to forcefully kill process! (waited %s)", timer)
raise ProcessTerminationFailed(process=self, message="Failed to kill process! (%s)" % self)
else:
self.logger.info("Successfully killed process in %s.", timer)
return True
return not self.is_running
else:
return False
def kill_helper(self):
"""Forcefully kill the process (needs to be implemented by subclasses)."""
raise NotImplementedError("You need to implement the kill_helper() method!")
def __str__(self):
"""
Render a human friendly representation of a :class:`ControllableProcess` object.
:returns: A string describing the process. Includes the process ID and the
command line (when available).
"""
text = []
# Include the process ID? (only when it's available)
if self.pid is not None:
text.append(str(self.pid))
# Include the command line? (again, only when it's available)
if self.command_line:
# We import here to avoid circular imports.
from executor import quote
text.append("(%s)" % quote(self.command_line))
if not text:
# If all else fails we fall back to the super class.
text.append(object.__str__(self))
return " ".join(text)
class ProcessTerminationFailed(PropertyManager, Exception):
"""Raised when process termination fails."""
def __init__(self, *args, **kw):
"""
Initialize a :class:`ProcessTerminationFailed` object.
This method's signature is the same as the initializer of the
:class:`~property_manager.PropertyManager` class.
"""
PropertyManager.__init__(self, *args, **kw)
Exception.__init__(self, self.message)
@required_property(usage_notes=False)
def process(self):
"""The :class:`ControllableProcess` object that triggered the exception."""
@required_property(usage_notes=False)
def message(self):
"""An error message that explains how the process termination failed."""
| [
"logging.getLogger",
"humanfriendly.Timer",
"property_manager.required_property",
"property_manager.PropertyManager.__init__",
"executor.quote",
"humanfriendly.terminal.spinners.Spinner"
] | [((1026, 1053), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1043, 1053), False, 'import logging\n'), ((11729, 11765), 'property_manager.required_property', 'required_property', ([], {'usage_notes': '(False)'}), '(usage_notes=False)\n', (11746, 11765), False, 'from property_manager import PropertyManager, mutable_property, required_property\n'), ((11879, 11915), 'property_manager.required_property', 'required_property', ([], {'usage_notes': '(False)'}), '(usage_notes=False)\n', (11896, 11915), False, 'from property_manager import PropertyManager, mutable_property, required_property\n'), ((11632, 11675), 'property_manager.PropertyManager.__init__', 'PropertyManager.__init__', (['self', '*args'], {}), '(self, *args, **kw)\n', (11656, 11675), False, 'from property_manager import PropertyManager, mutable_property, required_property\n'), ((5448, 5469), 'humanfriendly.Timer', 'Timer', ([], {'resumable': '(True)'}), '(resumable=True)\n', (5453, 5469), False, 'from humanfriendly import Timer\n'), ((5497, 5542), 'humanfriendly.terminal.spinners.Spinner', 'Spinner', ([], {'interactive': 'use_spinner', 'timer': 'timer'}), '(interactive=use_spinner, timer=timer)\n', (5504, 5542), False, 'from humanfriendly.terminal.spinners import Spinner\n'), ((11070, 11094), 'executor.quote', 'quote', (['self.command_line'], {}), '(self.command_line)\n', (11075, 11094), False, 'from executor import quote\n')] |
from logging import basicConfig, getLogger, INFO
from connect_to_ledger import create_qldb_driver
from amazon.ion.simpleion import dumps, loads
logger = getLogger(__name__)
basicConfig(level=INFO)
from constants import Constants
from register_person import get_scentityid_from_personid,get_scentity_contact
from sampledata.sample_data import get_value_from_documentid,document_exist,update_document
from check_container_safety import isContainerSafe
from create_purchase_order_to_manufacturer import get_sub_details
from export_transport_product import create_lorry_reciept, update_document_in_container
from export_customs_approval import document_already_approved_by_customs
import copy
def deliver_product_to_distributor(transaction_executor,pick_up_request_id,pick_up_person_id):
if document_exist(transaction_executor,Constants.PICK_UP_REQUESTS_TABLE,pick_up_request_id):
update_document(transaction_executor,Constants.PICK_UP_REQUESTS_TABLE,"isAccepted",pick_up_request_id,True)
purchase_order_id = get_value_from_documentid(transaction_executor,Constants.PICK_UP_REQUESTS_TABLE, pick_up_request_id, "PurchaseOrderId")
purchase_order_id = purchase_order_id[0]
container_ids = get_value_from_documentid(transaction_executor,Constants.PURCHASE_ORDER_TABLE_NAME,purchase_order_id,"HighestPackagingLevelIds")
for container_id in container_ids[0]:
total_containers_ordered = len(container_ids[0])
if document_exist(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_id):
#check if container_exist
certificate_of_origin_id = get_value_from_documentid(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_id,"CertificateOfOriginId")
packing_list_id = get_value_from_documentid(transaction_executor, Constants.CONTAINER_TABLE_NAME,container_id,"PackingListId")
import_custom_approved = (document_already_approved_by_customs(transaction_executor,"ImportApproval",Constants.CERTIFICATE_OF_ORIGIN_TABLE_NAME,certificate_of_origin_id[0]) and
document_already_approved_by_customs(transaction_executor,"ImportApproval", Constants.PACKING_LIST_TABLE_NAME,packing_list_id[0]))
if import_custom_approved:
logger.info("Approved by Import!")
total_safe_containers = copy.copy(total_containers_ordered)
if isContainerSafe(transaction_executor,container_id):
actual_sc_entity_id = get_scentityid_from_personid(transaction_executor,pick_up_person_id)
transport_type = get_value_from_documentid(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_id,"TransportType")
#check if container is safe
if transport_type[0] == 1:
table = Constants.AIRWAY_BILL_TABLE_NAME
airway_bills = get_value_from_documentid(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_id,"AirwayBillIds")
elif transport_type[0] == 2:
table = Constants.BILL_OF_LADING_TABLE_NAME
bill_of_lading = get_value_from_documentid(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_id,"BillOfLadingIds")
print(transport_type)
pick_up_scentity_id = get_value_from_documentid(transaction_executor,Constants.PICK_UP_REQUESTS_TABLE,pick_up_request_id, "CarrierCompanyId")
if actual_sc_entity_id == pick_up_scentity_id[0]:
logger.info("Authorized!")
if transport_type[0] == 1:
is_picked_up = get_sub_details(transaction_executor,table,"RecieverApproval",airway_bills[0][-1],"isApproved")
if is_picked_up[0] == 0:
update_document(transaction_executor,Constants.AIRWAY_BILL_TABLE_NAME,"RecieverApproval.isApproved",airway_bills[0][-1],True)
update_document(transaction_executor,Constants.AIRWAY_BILL_TABLE_NAME,"RecieverApproval.ApproverId",airway_bills[0][-1], pick_up_person_id)
pick_up_location = get_value_from_documentid(transaction_executor,Constants.AIRWAY_BILL_TABLE_NAME,airway_bills[0][-1],"ImportAirportName")
consignee_id = get_value_from_documentid(transaction_executor,Constants.AIRWAY_BILL_TABLE_NAME,airway_bills[0][-1],"SenderScEntityId")
else:
return_statement = "container already picked up"
return{
'statusCode': 400,
'body': return_statement
}
elif transport_type[0] == 2:
is_picked_up = get_sub_details(transaction_executor,table,"RecieverApproval",bill_of_lading[0][-1],"isApproved")
if is_picked_up[0] == 0:
update_document(transaction_executor,Constants.BILL_OF_LADING_TABLE_NAME,"RecieverApproval.isApproved",bill_of_lading[0][-1],True)
update_document(transaction_executor,Constants.BILL_OF_LADING_TABLE_NAME,"RecieverApproval.ApproverId",bill_of_lading[0][-1], pick_up_person_id)
pick_up_location = get_value_from_documentid(transaction_executor,Constants.BILL_OF_LADING_TABLE_NAME,bill_of_lading[0][-1],"ImportPortName")
consignee_id = get_value_from_documentid(transaction_executor,Constants.BILL_OF_LADING_TABLE_NAME,bill_of_lading[0][-1],"SenderScEntityId")
else:
return_statement = "Containers Already picked up"
return{
'statusCode': 400,
'body': return_statement
}
consignee_name = get_value_from_documentid(transaction_executor,Constants.SCENTITY_TABLE_NAME,consignee_id[0],"ScEntityName")
delivery_location = get_scentity_contact(transaction_executor,actual_sc_entity_id[0],"Address")
lorry_reciepts = get_value_from_documentid(transaction_executor,Constants.CONTAINER_TABLE_NAME,container_id,"LorryRecieptIds")
carrier_id = get_value_from_documentid(transaction_executor,Constants.LORRY_RECEIPT_TABLE_NAME,lorry_reciepts[0][-1],"CarrierId")
if carrier_id[0] == pick_up_scentity_id[0]:
return_statement = "No request was made by buyer to pickup. Creating a new L/R to initiate import delivery."
lorry_reciept_id = create_lorry_reciept(transaction_executor,actual_sc_entity_id,pick_up_person_id,pick_up_location[0],delivery_location,consignee_id,consignee_name,True)
update_document_in_container(transaction_executor,container_id,"LorryRecieptIds",lorry_reciept_id[0])
return{
'statusCode': 200,
'body': {
"Message":return_statement,
"LorryRecieptId":lorry_reciept_id[0]
}
}
else:
message = "Pick up request was made by new carrier assigned by buyer."
update_document(transaction_executor,Constants.LORRY_RECEIPT_TABLE_NAME,"isPickedUp",lorry_reciepts[0][-1],True)
return{
'statusCode': 200,
'body': {
"Message":return_statement,
"LorryRecieptId":lorry_reciepts[0][-1]
}
}
else:
return_statement = "Not Authorized!"
return{
'statusCode': 400,
'body': return_statement
}
else:
# raise Exception("Container Not Safe!")
total_safe_containers = total_safe_containers - 1
if total_safe_containers > 0:
continue
else:
return_statement = "Not Container was safe. Pick up can't be made for any container"
return{
'statusCode': 400,
'body': return_statement
}
elif not isContainerSafe(transaction_executor,container_id):
continue
else:
return_statement = "Illegal=====Container was safe but not approved by customs"
return{
'statusCode': 400,
'body': return_statement
}
else:
return_statement = ("Not Authorized!")
return{
'statusCode': 400,
'body': return_statement
}
else:
return_statement = "Check Request Id"
return{
'statusCode': 400,
'body': return_statement
}
def pick_up_for_import(event):
try:
with create_qldb_driver() as driver:
pickuprequestid = event["PickUpRequestId"] #96EioNwSjcq8HKLBRjcvPp
pickuppersonid = event["PersonId"]
return driver.execute_lambda(lambda executor: deliver_product_to_distributor(executor, pickuprequestid, pickuppersonid))
except Exception:
return_statement = 'Error in import pick up'
return{
'statusCode': 400,
'body': return_statement
} | [
"logging.getLogger",
"logging.basicConfig",
"sampledata.sample_data.get_value_from_documentid",
"register_person.get_scentity_contact",
"check_container_safety.isContainerSafe",
"connect_to_ledger.create_qldb_driver",
"register_person.get_scentityid_from_personid",
"sampledata.sample_data.update_docum... | [((153, 172), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (162, 172), False, 'from logging import basicConfig, getLogger, INFO\n'), ((173, 196), 'logging.basicConfig', 'basicConfig', ([], {'level': 'INFO'}), '(level=INFO)\n', (184, 196), False, 'from logging import basicConfig, getLogger, INFO\n'), ((793, 887), 'sampledata.sample_data.document_exist', 'document_exist', (['transaction_executor', 'Constants.PICK_UP_REQUESTS_TABLE', 'pick_up_request_id'], {}), '(transaction_executor, Constants.PICK_UP_REQUESTS_TABLE,\n pick_up_request_id)\n', (807, 887), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((891, 1006), 'sampledata.sample_data.update_document', 'update_document', (['transaction_executor', 'Constants.PICK_UP_REQUESTS_TABLE', '"""isAccepted"""', 'pick_up_request_id', '(True)'], {}), "(transaction_executor, Constants.PICK_UP_REQUESTS_TABLE,\n 'isAccepted', pick_up_request_id, True)\n", (906, 1006), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((1027, 1152), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.PICK_UP_REQUESTS_TABLE', 'pick_up_request_id', '"""PurchaseOrderId"""'], {}), "(transaction_executor, Constants.\n PICK_UP_REQUESTS_TABLE, pick_up_request_id, 'PurchaseOrderId')\n", (1052, 1152), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((1220, 1356), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.PURCHASE_ORDER_TABLE_NAME', 'purchase_order_id', '"""HighestPackagingLevelIds"""'], {}), "(transaction_executor, Constants.\n PURCHASE_ORDER_TABLE_NAME, purchase_order_id, 'HighestPackagingLevelIds')\n", (1245, 1356), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((1488, 1574), 'sampledata.sample_data.document_exist', 'document_exist', (['transaction_executor', 'Constants.CONTAINER_TABLE_NAME', 'container_id'], {}), '(transaction_executor, Constants.CONTAINER_TABLE_NAME,\n container_id)\n', (1502, 1574), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((10481, 10501), 'connect_to_ledger.create_qldb_driver', 'create_qldb_driver', ([], {}), '()\n', (10499, 10501), False, 'from connect_to_ledger import create_qldb_driver\n'), ((1651, 1774), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.CONTAINER_TABLE_NAME', 'container_id', '"""CertificateOfOriginId"""'], {}), "(transaction_executor, Constants.\n CONTAINER_TABLE_NAME, container_id, 'CertificateOfOriginId')\n", (1676, 1774), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((1801, 1916), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.CONTAINER_TABLE_NAME', 'container_id', '"""PackingListId"""'], {}), "(transaction_executor, Constants.\n CONTAINER_TABLE_NAME, container_id, 'PackingListId')\n", (1826, 1916), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((1952, 2105), 'export_customs_approval.document_already_approved_by_customs', 'document_already_approved_by_customs', (['transaction_executor', '"""ImportApproval"""', 'Constants.CERTIFICATE_OF_ORIGIN_TABLE_NAME', 'certificate_of_origin_id[0]'], {}), "(transaction_executor, 'ImportApproval',\n Constants.CERTIFICATE_OF_ORIGIN_TABLE_NAME, certificate_of_origin_id[0])\n", (1988, 2105), False, 'from export_customs_approval import document_already_approved_by_customs\n'), ((2119, 2254), 'export_customs_approval.document_already_approved_by_customs', 'document_already_approved_by_customs', (['transaction_executor', '"""ImportApproval"""', 'Constants.PACKING_LIST_TABLE_NAME', 'packing_list_id[0]'], {}), "(transaction_executor, 'ImportApproval',\n Constants.PACKING_LIST_TABLE_NAME, packing_list_id[0])\n", (2155, 2254), False, 'from export_customs_approval import document_already_approved_by_customs\n'), ((2394, 2429), 'copy.copy', 'copy.copy', (['total_containers_ordered'], {}), '(total_containers_ordered)\n', (2403, 2429), False, 'import copy\n'), ((2453, 2504), 'check_container_safety.isContainerSafe', 'isContainerSafe', (['transaction_executor', 'container_id'], {}), '(transaction_executor, container_id)\n', (2468, 2504), False, 'from check_container_safety import isContainerSafe\n'), ((2551, 2620), 'register_person.get_scentityid_from_personid', 'get_scentityid_from_personid', (['transaction_executor', 'pick_up_person_id'], {}), '(transaction_executor, pick_up_person_id)\n', (2579, 2620), False, 'from register_person import get_scentityid_from_personid, get_scentity_contact\n'), ((2661, 2776), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.CONTAINER_TABLE_NAME', 'container_id', '"""TransportType"""'], {}), "(transaction_executor, Constants.\n CONTAINER_TABLE_NAME, container_id, 'TransportType')\n", (2686, 2776), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((3627, 3753), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.PICK_UP_REQUESTS_TABLE', 'pick_up_request_id', '"""CarrierCompanyId"""'], {}), "(transaction_executor, Constants.\n PICK_UP_REQUESTS_TABLE, pick_up_request_id, 'CarrierCompanyId')\n", (3652, 3753), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((9711, 9762), 'check_container_safety.isContainerSafe', 'isContainerSafe', (['transaction_executor', 'container_id'], {}), '(transaction_executor, container_id)\n', (9726, 9762), False, 'from check_container_safety import isContainerSafe\n'), ((3034, 3149), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.CONTAINER_TABLE_NAME', 'container_id', '"""AirwayBillIds"""'], {}), "(transaction_executor, Constants.\n CONTAINER_TABLE_NAME, container_id, 'AirwayBillIds')\n", (3059, 3149), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((6613, 6729), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.SCENTITY_TABLE_NAME', 'consignee_id[0]', '"""ScEntityName"""'], {}), "(transaction_executor, Constants.\n SCENTITY_TABLE_NAME, consignee_id[0], 'ScEntityName')\n", (6638, 6729), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((6770, 6847), 'register_person.get_scentity_contact', 'get_scentity_contact', (['transaction_executor', 'actual_sc_entity_id[0]', '"""Address"""'], {}), "(transaction_executor, actual_sc_entity_id[0], 'Address')\n", (6790, 6847), False, 'from register_person import get_scentityid_from_personid, get_scentity_contact\n'), ((6891, 7008), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.CONTAINER_TABLE_NAME', 'container_id', '"""LorryRecieptIds"""'], {}), "(transaction_executor, Constants.\n CONTAINER_TABLE_NAME, container_id, 'LorryRecieptIds')\n", (6916, 7008), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((7042, 7166), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.LORRY_RECEIPT_TABLE_NAME', 'lorry_reciepts[0][-1]', '"""CarrierId"""'], {}), "(transaction_executor, Constants.\n LORRY_RECEIPT_TABLE_NAME, lorry_reciepts[0][-1], 'CarrierId')\n", (7067, 7166), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((3370, 3487), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.CONTAINER_TABLE_NAME', 'container_id', '"""BillOfLadingIds"""'], {}), "(transaction_executor, Constants.\n CONTAINER_TABLE_NAME, container_id, 'BillOfLadingIds')\n", (3395, 3487), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((4020, 4123), 'create_purchase_order_to_manufacturer.get_sub_details', 'get_sub_details', (['transaction_executor', 'table', '"""RecieverApproval"""', 'airway_bills[0][-1]', '"""isApproved"""'], {}), "(transaction_executor, table, 'RecieverApproval',\n airway_bills[0][-1], 'isApproved')\n", (4035, 4123), False, 'from create_purchase_order_to_manufacturer import get_sub_details\n'), ((7423, 7589), 'export_transport_product.create_lorry_reciept', 'create_lorry_reciept', (['transaction_executor', 'actual_sc_entity_id', 'pick_up_person_id', 'pick_up_location[0]', 'delivery_location', 'consignee_id', 'consignee_name', '(True)'], {}), '(transaction_executor, actual_sc_entity_id,\n pick_up_person_id, pick_up_location[0], delivery_location, consignee_id,\n consignee_name, True)\n', (7443, 7589), False, 'from export_transport_product import create_lorry_reciept, update_document_in_container\n'), ((7607, 7715), 'export_transport_product.update_document_in_container', 'update_document_in_container', (['transaction_executor', 'container_id', '"""LorryRecieptIds"""', 'lorry_reciept_id[0]'], {}), "(transaction_executor, container_id,\n 'LorryRecieptIds', lorry_reciept_id[0])\n", (7635, 7715), False, 'from export_transport_product import create_lorry_reciept, update_document_in_container\n'), ((8318, 8438), 'sampledata.sample_data.update_document', 'update_document', (['transaction_executor', 'Constants.LORRY_RECEIPT_TABLE_NAME', '"""isPickedUp"""', 'lorry_reciepts[0][-1]', '(True)'], {}), "(transaction_executor, Constants.LORRY_RECEIPT_TABLE_NAME,\n 'isPickedUp', lorry_reciepts[0][-1], True)\n", (8333, 8438), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((4210, 4343), 'sampledata.sample_data.update_document', 'update_document', (['transaction_executor', 'Constants.AIRWAY_BILL_TABLE_NAME', '"""RecieverApproval.isApproved"""', 'airway_bills[0][-1]', '(True)'], {}), "(transaction_executor, Constants.AIRWAY_BILL_TABLE_NAME,\n 'RecieverApproval.isApproved', airway_bills[0][-1], True)\n", (4225, 4343), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((4372, 4518), 'sampledata.sample_data.update_document', 'update_document', (['transaction_executor', 'Constants.AIRWAY_BILL_TABLE_NAME', '"""RecieverApproval.ApproverId"""', 'airway_bills[0][-1]', 'pick_up_person_id'], {}), "(transaction_executor, Constants.AIRWAY_BILL_TABLE_NAME,\n 'RecieverApproval.ApproverId', airway_bills[0][-1], pick_up_person_id)\n", (4387, 4518), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((4567, 4695), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.AIRWAY_BILL_TABLE_NAME', 'airway_bills[0][-1]', '"""ImportAirportName"""'], {}), "(transaction_executor, Constants.\n AIRWAY_BILL_TABLE_NAME, airway_bills[0][-1], 'ImportAirportName')\n", (4592, 4695), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((4739, 4866), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.AIRWAY_BILL_TABLE_NAME', 'airway_bills[0][-1]', '"""SenderScEntityId"""'], {}), "(transaction_executor, Constants.\n AIRWAY_BILL_TABLE_NAME, airway_bills[0][-1], 'SenderScEntityId')\n", (4764, 4866), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((5297, 5402), 'create_purchase_order_to_manufacturer.get_sub_details', 'get_sub_details', (['transaction_executor', 'table', '"""RecieverApproval"""', 'bill_of_lading[0][-1]', '"""isApproved"""'], {}), "(transaction_executor, table, 'RecieverApproval',\n bill_of_lading[0][-1], 'isApproved')\n", (5312, 5402), False, 'from create_purchase_order_to_manufacturer import get_sub_details\n'), ((5488, 5626), 'sampledata.sample_data.update_document', 'update_document', (['transaction_executor', 'Constants.BILL_OF_LADING_TABLE_NAME', '"""RecieverApproval.isApproved"""', 'bill_of_lading[0][-1]', '(True)'], {}), "(transaction_executor, Constants.BILL_OF_LADING_TABLE_NAME,\n 'RecieverApproval.isApproved', bill_of_lading[0][-1], True)\n", (5503, 5626), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((5655, 5806), 'sampledata.sample_data.update_document', 'update_document', (['transaction_executor', 'Constants.BILL_OF_LADING_TABLE_NAME', '"""RecieverApproval.ApproverId"""', 'bill_of_lading[0][-1]', 'pick_up_person_id'], {}), "(transaction_executor, Constants.BILL_OF_LADING_TABLE_NAME,\n 'RecieverApproval.ApproverId', bill_of_lading[0][-1], pick_up_person_id)\n", (5670, 5806), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((5855, 5985), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.BILL_OF_LADING_TABLE_NAME', 'bill_of_lading[0][-1]', '"""ImportPortName"""'], {}), "(transaction_executor, Constants.\n BILL_OF_LADING_TABLE_NAME, bill_of_lading[0][-1], 'ImportPortName')\n", (5880, 5985), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n'), ((6029, 6161), 'sampledata.sample_data.get_value_from_documentid', 'get_value_from_documentid', (['transaction_executor', 'Constants.BILL_OF_LADING_TABLE_NAME', 'bill_of_lading[0][-1]', '"""SenderScEntityId"""'], {}), "(transaction_executor, Constants.\n BILL_OF_LADING_TABLE_NAME, bill_of_lading[0][-1], 'SenderScEntityId')\n", (6054, 6161), False, 'from sampledata.sample_data import get_value_from_documentid, document_exist, update_document\n')] |
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Regression test for iscpy.py
Make sure you are running this against a database that can be destroyed.
DO NOT EVER RUN THIS TEST AGAINST A PRODUCTION DATABASE.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import unittest
import os
import iscpy
NAMED_FILE = 'test_data/named.example.conf'
class TestNamedImport(unittest.TestCase):
def setUp(self):
self.named_file = (
'include "/home/jcollins/roster-dns-management/test/test_data/rndc.key";'
'options { pid-file "test_data/named.pid";};\n'
'controls { inet 127.0.0.1 port 35638 allow{localhost;} keys {rndc-key;};};')
self.maxDiff = None
def testSingleOption(self):
test_string = (
u'single-option;\n'
u'boolean-option yes;\n'
u'list-option { a; b; };\n'
u'options {};\n')
self.assertEqual(iscpy.Deserialize(iscpy.Serialize(test_string)),
u'single-option ;\n'
u'boolean-option yes;\n'
u'options { };\n'
u'list-option { a;\n'
u'b; };')
def testParse(self):
self.assertEqual(iscpy.Explode(iscpy.ScrubComments(self.named_file)),
['include "/home/jcollins/roster-dns-management/test/test_data/rndc.key"',
';', 'options', '{', 'pid-file "test_data/named.pid"', ';', '}', ';',
'controls', '{', 'inet 127.0.0.1 port 35638 allow', '{', 'localhost',
';', '}', 'keys', '{', 'rndc-key', ';', '}', ';', '}', ';'])
self.assertEqual(iscpy.ParseISCString(self.named_file),
{'include': '"/home/jcollins/roster-dns-management/test/test_data/rndc.key"',
'options': {'pid-file': '"test_data/named.pid"'},
'controls': [{'inet 127.0.0.1 port 35638 allow': {'localhost': True}},
{'keys': {'rndc-key': True}}]})
self.assertEqual(iscpy.MakeISC(iscpy.ParseISCString(self.named_file)),
'include "/home/jcollins/roster-dns-management/test/test_data/rndc.key";\n'
'options { pid-file "test_data/named.pid"; };\n'
'controls { inet 127.0.0.1 port 35638 allow { localhost; } keys { rndc-key; }; };')
if( __name__ == '__main__' ):
unittest.main()
| [
"unittest.main",
"iscpy.ParseISCString",
"iscpy.ScrubComments",
"iscpy.Serialize"
] | [((3717, 3732), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3730, 3732), False, 'import unittest\n'), ((3058, 3095), 'iscpy.ParseISCString', 'iscpy.ParseISCString', (['self.named_file'], {}), '(self.named_file)\n', (3078, 3095), False, 'import iscpy\n'), ((2460, 2488), 'iscpy.Serialize', 'iscpy.Serialize', (['test_string'], {}), '(test_string)\n', (2475, 2488), False, 'import iscpy\n'), ((2687, 2723), 'iscpy.ScrubComments', 'iscpy.ScrubComments', (['self.named_file'], {}), '(self.named_file)\n', (2706, 2723), False, 'import iscpy\n'), ((3411, 3448), 'iscpy.ParseISCString', 'iscpy.ParseISCString', (['self.named_file'], {}), '(self.named_file)\n', (3431, 3448), False, 'import iscpy\n')] |
import functools
import logging
from typing import Callable
from logging_context.context.base import BaseContext
from .context import get_logging_context
def context_logging_factory(record_factory: Callable, context: BaseContext) -> Callable:
@functools.wraps(record_factory)
def wrapper(*args, **kwargs):
record = record_factory(*args, **kwargs)
record.context = str(context)
return record
return wrapper
def setup_logging_context(context: BaseContext = None) -> None:
if not context:
context = get_logging_context()
current_factory = logging.getLogRecordFactory()
test_record = current_factory(__name__, logging.DEBUG, __file__, 0, "", [], None)
if not hasattr(test_record, "context"):
logging.setLogRecordFactory(context_logging_factory(current_factory, context=context))
| [
"logging.getLogRecordFactory",
"functools.wraps"
] | [((252, 283), 'functools.wraps', 'functools.wraps', (['record_factory'], {}), '(record_factory)\n', (267, 283), False, 'import functools\n'), ((595, 624), 'logging.getLogRecordFactory', 'logging.getLogRecordFactory', ([], {}), '()\n', (622, 624), False, 'import logging\n')] |
# %% [Algorithm 1c Loop]
# # MUSHROOMS
# %% [markdown]
# ## Binary Classification
# %% [markdown]
# ### Imports
# %%
import os
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
# %% [markdown]
# ### Load Data
dataset = pd.read_csv(r"C:\Users\yxie367\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
#dataset = pd.read_csv(r"C:\Users\xieya\Documents\GitHub\Mushrooms\DATA\mushrooms.csv")
# %% [markdown]
# ### View Data and Informations
# %%
dataset.head()
# %%
dataset.info()
# %%
edible, poisonous = dataset['class'].value_counts()
# print("Edible:\t ", edible,"\nPoisonous:", poisonous)
# %%
# Categorical to numerical
labels = {'e': 0, 'p': 1}
dataset['class'].replace(labels, inplace=True)
edible, poisonous = dataset['class'].value_counts()
#print("0 - Edible: ", edible,"\n1 - Poisonous:", poisonous)
# %% [markdown]
# # NN1 Stalk Root - Rooted (r)
# %% [markdown]
# ### Split Dataset
# %% [markdown]
# #### Get the Labels
# %%
X, y = dataset.drop('class', axis=1), dataset['class'].copy()
#print("X:",X.shape,"\ny:",y.shape)
# %% [markdown]
# #### Train Set and Test Set
total_error_1 = 0
total_error_2 = 0
total_error_comb = 0
randnum = np.arange(2,44,4)
num_trials = len(randnum)
record = ""
wrong_record = ""
run = 1
# %% Data cleaning
from sklearn.model_selection import train_test_split
X_white = pd.DataFrame()
X_not_white = pd.DataFrame()
y_white = pd.Series(dtype='float64')
y_not_white = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"stalk-root"] == "r":
X_white = X_white.append(X.iloc[i,:])
y_white = y_white.append(pd.Series(y.iloc[i]))
else:
X_not_white = X_not_white.append(X.iloc[i,:])
y_not_white = y_not_white.append(pd.Series(y.iloc[i]))
# %% Data cleaning pt2
X_green = pd.DataFrame()
X_not_green = pd.DataFrame()
y_green = pd.Series(dtype='float64')
y_not_green = pd.Series(dtype='float64')
for i in range(0,len(X)):
if X.loc[i,"odor"] == "a":
X_green = X_green.append(X.iloc[i,:])
y_green = y_green.append(pd.Series(y.iloc[i]))
else:
X_not_green = X_not_green.append(X.iloc[i,:])
y_not_green = y_not_green.append(pd.Series(y.iloc[i]))
# %%
for j in randnum:
X_train_not_white, X_test_not_white, y_train_not_white, y_test_not_white = train_test_split(X_not_white, y_not_white, test_size=1-(6905/(8124-len(X_white))), random_state=j)
X_train_not_green, X_test_not_green, y_train_not_green, y_test_not_green = train_test_split(X_not_green, y_not_green, test_size=1-(6905/(8124-len(X_green))), random_state=j)
X_train_green = (X_train_not_green)
y_train_green = (y_train_not_green)
X_train_white = (X_train_not_white)
y_train_white = (y_train_not_white)
# %%
from sklearn.utils import shuffle
X_train_full1 = shuffle(X_train_white, random_state=j)
X_test = shuffle(X, random_state=j).iloc[4000:8000]
y_train_full1 = shuffle(y_train_white, random_state=j)
y_test = shuffle(y, random_state=j).iloc[4000:8000]
# %% [markdown]
# #### Validation Set
# %%
X_valid1, X_train1 = X_train_full1[:500], X_train_full1[500:]
y_valid1, y_train1 = y_train_full1[:500], y_train_full1[500:]
# print("X_train:", X_train1.shape[0], "y_train", y_train1.shape[0])
# print("X_valid: ", X_valid1.shape[0], "y_valid ", y_valid1.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train1 = pipeline.fit_transform(X_train1)
X_valid1 = pipeline.fit_transform(X_valid1)
X_test1 = pipeline.fit_transform(X_test)
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
# %%
# tf.random.set_seed(j)
tf.random.set_random_seed(j)
# %%
model1 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model1.summary()
# %% [markdown]
# #### Compile the Model
# %%
model1.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model1 = model1.fit(X_train1, y_train1,
epochs=100,
validation_data=(X_valid1, y_valid1),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results1 = model1.evaluate(X_test1, y_test)
# print("test loss, test acc:", results1)
# %% [markdown]
# ### Make Some Predictions
# %%
X_new1 = X_test1[:5]
y_prob1 = model1.predict(X_new1)
# print(y_prob.round(3))
# %%
y_pred1 = (model1.predict(X_new1) > 0.5).astype("int32")
# print(y_pred)
y_test_pred = (model1.predict(X_test1) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df1 = pd.DataFrame(model1.predict(X_test1))
y_test_pred1 = pd.DataFrame(y_test_pred).reset_index(drop=True)
X_df1 = pd.concat([X_df1, y_test_pred1], axis=1)
y_test1 = y_test.reset_index(drop=True)
X_df1 = pd.concat([X_df1, y_test1], axis=1)
X_df1.columns = ["X_pred","y_pred","y_actual"]
#print(X_df1)
# %%
import math
table1 = pd.DataFrame(columns=["KL_div","abs_distance","correctness"])
for i in range(0,len(X_df1)):
# KL divergence
p = X_df1.loc[i,"X_pred"]
try:
kl = -(p*math.log(p) + (1-p)*math.log(1-p))
except:
kl = 0
table1.loc[i,"KL_div"] = kl
# absolute distance
abs_dist = 2*abs(0.5-p)
table1.loc[i,"abs_distance"] = abs_dist
# correctness
y_pred1 = X_df1.loc[i,"y_pred"]
y_act1 = X_df1.loc[i,"y_actual"]
if y_pred1 == y_act1:
table1.loc[i,"correctness"] = 1 # correct prediction
else:
table1.loc[i,"correctness"] = 0 # wrong prediction
table1.loc[i,"y_pred"] = y_pred1
#print(table1)
# %%
table1["count"] = 1
correctness1 = table1[["correctness","count"]].groupby(pd.cut(table1["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
correctness1["percent"] = 100*(correctness1["correctness"]/correctness1["count"])
#print(correctness1)
# %%
index = []
for i in (correctness1.index):
index.append(str(i))
plt.bar(index,correctness1["percent"], width=0.7)
for index,data in enumerate(correctness1["percent"]):
plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center')
plt.ylim(0,120)
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %% [markdown]
# ### Confidence
# %%
kl1 = table1[["correctness","count"]].groupby(pd.cut(table1["KL_div"], np.arange(0, 0.80, 0.1))).apply(sum)
kl1["percent"] = (kl1["correctness"]/kl1["count"])
kl1.dropna(inplace=True)
plt.scatter(np.arange(0, 0.70, 0.1), kl1["percent"])
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %%
# Linear Regression
from sklearn.linear_model import LinearRegression
x_reg1 = np.arange(0, 0.70, 0.1).reshape((-1, 1))
y_reg1 = kl1["percent"]
reg_model1 = LinearRegression().fit(x_reg1,y_reg1)
# %%
# print('intercept(alpha):', reg_model1.intercept_)
# print('slope(theta):', reg_model1.coef_)
# %% [markdown]
# # NN2 Odor - Almond (a)
# %% [markdown]
# #### Train Set and Test Set
# %%
from sklearn.utils import shuffle
X_train_full2 = shuffle(X_train_green, random_state=j)
# X_test2 = shuffle(X_test_green, random_state=j)
y_train_full2 = shuffle(y_train_green, random_state=j)
# y_test2 = shuffle(y_test_green, random_state=j)
# %% [markdown]
# #### Validation Set
# %%
X_valid2, X_train2 = X_train_full2[:500], X_train_full2[500:]
y_valid2, y_train2 = y_train_full2[:500], y_train_full2[500:]
# print("X_train:", X_train2.shape[0], "y_train", y_train2.shape[0])
# print("X_valid: ", X_valid2.shape[0], "y_valid ", y_valid2.shape[0])
# print("X_test: ", X_test.shape[0], "y_test ", X_test.shape[0])
# %% [markdown]
# ### Prepare the Data
# %% [markdown]
# #### Data Transformation
# %%
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.compose import ColumnTransformer
cat_attr_pipeline = Pipeline([
('encoder', OrdinalEncoder())
])
cols = list(X)
pipeline = ColumnTransformer([
('cat_attr_pipeline', cat_attr_pipeline, cols)
])
X_train2 = pipeline.fit_transform(X_train2)
X_valid2 = pipeline.fit_transform(X_valid2)
X_test2 = pipeline.fit_transform(X_test)
y_test2 = y_test
# %% [markdown]
# ### Neural Network
# %% [markdown]
# #### Model
# %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, Dense
tf.random.set_random_seed(j)
# %%
model2 = Sequential([
InputLayer(input_shape=(22,)), # input layer
Dense(45, activation='relu'), # hidden layer
Dense(1, activation='sigmoid') # output layer
])
# %%
#model2.summary()
# %% [markdown]
# #### Compile the Model
# %%
model2.compile(loss='binary_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# %% [markdown]
# #### Prepare Callbacks
# %%
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
checkpoint_cb = ModelCheckpoint('../SavedModels/best_model.h5',
save_best_only=True)
early_stopping_cb = EarlyStopping(patience=3,
restore_best_weights=True)
# %% [markdown]
# ### Training
# %%
train_model2 = model2.fit(X_train2, y_train2,
epochs=100,
validation_data=(X_valid2, y_valid2),
callbacks=[checkpoint_cb, early_stopping_cb])
# %% [markdown]
# ### Evaluate the Best Model on Test Set
# %%
results2 = model2.evaluate(X_test2, y_test2)
# print("test loss, test acc:", results2)
# %% [markdown]
# ### Make Some Predictions
# %%
# y_pred2 = (model2.predict(X_new2) > 0.5).astype("int32")
# print(y_pred2)
y_test_pred2 = (model2.predict(X_test2) > 0.5).astype("int32")
# %% [markdown]
# ## KL Divergence
# %%
# X_new = X_test[:5]
X_df2 = pd.DataFrame(model2.predict(X_test2))
y_test_pred2 = pd.DataFrame(y_test_pred2).reset_index(drop=True)
X_df2 = pd.concat([X_df2, y_test_pred2], axis=1)
y_test2 = y_test2.reset_index(drop=True)
X_df2 = pd.concat([X_df2, y_test2], axis=1)
X_df2.columns = ["X_pred","y_pred","y_actual"]
#print(X_df2)
# %%
import math
table2 = pd.DataFrame(columns=["KL_div","abs_distance","y_pred","correctness"])
for i in range(0,len(X_df2)):
# KL divergence
p = X_df2.loc[i,"X_pred"]
if p > 0:
kl = -(p*math.log(p) + (1-p)*math.log(1-p))
else:
kl = 1
table2.loc[i,"KL_div"] = kl
# absolute distance
abs_dist = 2*abs(0.5-p)
table2.loc[i,"abs_distance"] = abs_dist
# correctness
y_pred = X_df2.loc[i,"y_pred"]
y_act = X_df2.loc[i,"y_actual"]
if y_pred == y_act:
table2.loc[i,"correctness"] = 1 # correct prediction
else:
table2.loc[i,"correctness"] = 0 # wrong prediction
table2.loc[i,"y_pred"] = y_pred
#print(table2)
# %%
table2["count"] = 1
correctness2 = table2[["correctness","count"]].groupby(pd.cut(table2["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
correctness2["percent"] = 100*(correctness2["correctness"]/correctness2["count"])
#print(correctness2)
# %%
index = []
for i in (correctness2.index):
index.append(str(i))
plt.bar(index,correctness2["percent"], width=0.7)
for index,data in enumerate(correctness2["percent"]):
plt.text(x=index , y =data+1 , s=f"{round(data,2)}" , fontdict=dict(fontsize=15),ha='center')
plt.ylim(0,120)
plt.xlabel("KL Divergence")
plt.ylabel("% correct")
# %% [markdown]
# ### Confidence
# %%
kl2 = table2[["correctness","count"]].groupby(pd.cut(table2["KL_div"], np.arange(0, 0.8, 0.1))).apply(sum)
kl2["percent"] = (kl2["correctness"]/kl2["count"])
kl2.dropna(inplace=True)
plt.scatter(np.arange(0, 0.70, 0.1), kl2["percent"])
# print(kl)
# print(np.arange(0, 0.7, 0.05))
# %%
# Linear Regression
from sklearn.linear_model import LinearRegression
x_reg2 = np.arange(0, 0.7, 0.1).reshape((-1, 1))
y_reg2 = kl2["percent"]
reg_model2 = LinearRegression().fit(x_reg2,y_reg2)
# %%
# print('intercept(alpha):', reg_model2.intercept_)
# print('slope(theta):', reg_model2.coef_)
# %% [markdown]
# ## Algorithm C: It = argmax(Ct,i)
# %%
# Correct answer
ans = pd.DataFrame(X_df2["y_actual"])
# NN1
alpha1 = reg_model1.intercept_
theta1 = reg_model1.coef_
# NN2
alpha2 = reg_model2.intercept_
theta2 = reg_model2.coef_
# %%
# Creating NN tables
nn1 = table1.drop(["abs_distance","correctness"], axis=1)
nn1["conf"] = 1 + theta1 * nn1["KL_div"]
nn2 = table2.drop(["abs_distance","correctness"], axis=1)
nn2["conf"] = 1 + theta2 * nn2["KL_div"]
# nn2
# %%
# Determing higher confidence NN and choosing that arm
for i in range(0,len(nn1)):
if nn1.loc[i,"conf"] > nn2.loc[i,"conf"]:
ans.loc[i,"y_pred"] = nn1.loc[i,"y_pred"]
ans.loc[i,"NN"] = 1
ans.loc[i,"conf"] = nn1.loc[i,"conf"]
else:
ans.loc[i,"y_pred"] = nn2.loc[i,"y_pred"]
ans.loc[i,"NN"] = 2
ans.loc[i,"conf"] = nn2.loc[i,"conf"]
# ans
# %% [markdown]
# #### Comparing performance
# %%
# NN1 performance
cost1 = 0
for i in range(0,len(nn1)):
if nn1.loc[i,"y_pred"] != ans.loc[i,"y_actual"]:
cost1 += 1
else:
pass
# NN2 performance
cost2 = 0
for i in range(0,len(nn2)):
if nn2.loc[i,"y_pred"] != ans.loc[i,"y_actual"]:
cost2 += 1
else:
pass
# Combined performance
cost3 = 0
for i in range(0,len(nn1)):
nn = ans.loc[i,"NN"]
nn_conf = ans.loc[i,"conf"]
if ans.loc[i,"y_pred"] != ans.loc[i,"y_actual"]:
cost3 += 1
wrong_record = wrong_record + (f"Run:{run} - Wrong NN:{nn}, Conf:{nn_conf}") + "\n"
else:
pass
# %%
record = record+(f"Run:{run} - Error count for NN1:{cost1}, NN2:{cost2}, Combined:{cost3}") + "\n"
total_error_1 += cost1
total_error_2 += cost2
total_error_comb += cost3
print(f"Run {run} complete!")
run+=1
print(record)
print(f"Average error count for NN1:{total_error_1/num_trials}, NN2:{total_error_2/num_trials}, Combined:{total_error_comb/num_trials}")
#%%
# print(wrong_record)
# %%
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"math.log",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"sklearn.compose.ColumnTransformer",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"tensorflow.keras.layers.InputLay... | [((299, 388), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\yxie367\\\\Documents\\\\GitHub\\\\Mushrooms\\\\DATA\\\\mushrooms.csv"""'], {}), "(\n 'C:\\\\Users\\\\yxie367\\\\Documents\\\\GitHub\\\\Mushrooms\\\\DATA\\\\mushrooms.csv')\n", (310, 388), True, 'import pandas as pd\n'), ((1241, 1260), 'numpy.arange', 'np.arange', (['(2)', '(44)', '(4)'], {}), '(2, 44, 4)\n', (1250, 1260), True, 'import numpy as np\n'), ((1406, 1420), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1418, 1420), True, 'import pandas as pd\n'), ((1435, 1449), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1447, 1449), True, 'import pandas as pd\n'), ((1460, 1486), 'pandas.Series', 'pd.Series', ([], {'dtype': '"""float64"""'}), "(dtype='float64')\n", (1469, 1486), True, 'import pandas as pd\n'), ((1501, 1527), 'pandas.Series', 'pd.Series', ([], {'dtype': '"""float64"""'}), "(dtype='float64')\n", (1510, 1527), True, 'import pandas as pd\n'), ((1854, 1868), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1866, 1868), True, 'import pandas as pd\n'), ((1883, 1897), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1895, 1897), True, 'import pandas as pd\n'), ((1908, 1934), 'pandas.Series', 'pd.Series', ([], {'dtype': '"""float64"""'}), "(dtype='float64')\n", (1917, 1934), True, 'import pandas as pd\n'), ((1949, 1975), 'pandas.Series', 'pd.Series', ([], {'dtype': '"""float64"""'}), "(dtype='float64')\n", (1958, 1975), True, 'import pandas as pd\n'), ((2871, 2909), 'sklearn.utils.shuffle', 'shuffle', (['X_train_white'], {'random_state': 'j'}), '(X_train_white, random_state=j)\n', (2878, 2909), False, 'from sklearn.utils import shuffle\n'), ((2986, 3024), 'sklearn.utils.shuffle', 'shuffle', (['y_train_white'], {'random_state': 'j'}), '(y_train_white, random_state=j)\n', (2993, 3024), False, 'from sklearn.utils import shuffle\n'), ((3900, 3967), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (["[('cat_attr_pipeline', cat_attr_pipeline, cols)]"], {}), "([('cat_attr_pipeline', cat_attr_pipeline, cols)])\n", (3917, 3967), False, 'from sklearn.compose import ColumnTransformer\n'), ((4395, 4423), 'tensorflow.random.set_random_seed', 'tf.random.set_random_seed', (['j'], {}), '(j)\n', (4420, 4423), True, 'import tensorflow as tf\n'), ((5003, 5071), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""../SavedModels/best_model.h5"""'], {'save_best_only': '(True)'}), "('../SavedModels/best_model.h5', save_best_only=True)\n", (5018, 5071), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((5133, 5185), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(3)', 'restore_best_weights': '(True)'}), '(patience=3, restore_best_weights=True)\n', (5146, 5185), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((6195, 6235), 'pandas.concat', 'pd.concat', (['[X_df1, y_test_pred1]'], {'axis': '(1)'}), '([X_df1, y_test_pred1], axis=1)\n', (6204, 6235), True, 'import pandas as pd\n'), ((6292, 6327), 'pandas.concat', 'pd.concat', (['[X_df1, y_test1]'], {'axis': '(1)'}), '([X_df1, y_test1], axis=1)\n', (6301, 6327), True, 'import pandas as pd\n'), ((6436, 6499), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['KL_div', 'abs_distance', 'correctness']"}), "(columns=['KL_div', 'abs_distance', 'correctness'])\n", (6448, 6499), True, 'import pandas as pd\n'), ((7532, 7582), 'matplotlib.pyplot.bar', 'plt.bar', (['index', "correctness1['percent']"], {'width': '(0.7)'}), "(index, correctness1['percent'], width=0.7)\n", (7539, 7582), True, 'import matplotlib.pyplot as plt\n'), ((7746, 7762), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(120)'], {}), '(0, 120)\n', (7754, 7762), True, 'import matplotlib.pyplot as plt\n'), ((7766, 7793), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""KL Divergence"""'], {}), "('KL Divergence')\n", (7776, 7793), True, 'import matplotlib.pyplot as plt\n'), ((7798, 7821), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% correct"""'], {}), "('% correct')\n", (7808, 7821), True, 'import matplotlib.pyplot as plt\n'), ((8131, 8158), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""KL Divergence"""'], {}), "('KL Divergence')\n", (8141, 8158), True, 'import matplotlib.pyplot as plt\n'), ((8163, 8186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% correct"""'], {}), "('% correct')\n", (8173, 8186), True, 'import matplotlib.pyplot as plt\n'), ((8704, 8742), 'sklearn.utils.shuffle', 'shuffle', (['X_train_green'], {'random_state': 'j'}), '(X_train_green, random_state=j)\n', (8711, 8742), False, 'from sklearn.utils import shuffle\n'), ((8817, 8855), 'sklearn.utils.shuffle', 'shuffle', (['y_train_green'], {'random_state': 'j'}), '(y_train_green, random_state=j)\n', (8824, 8855), False, 'from sklearn.utils import shuffle\n'), ((9729, 9796), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (["[('cat_attr_pipeline', cat_attr_pipeline, cols)]"], {}), "([('cat_attr_pipeline', cat_attr_pipeline, cols)])\n", (9746, 9796), False, 'from sklearn.compose import ColumnTransformer\n'), ((10208, 10236), 'tensorflow.random.set_random_seed', 'tf.random.set_random_seed', (['j'], {}), '(j)\n', (10233, 10236), True, 'import tensorflow as tf\n'), ((10816, 10884), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""../SavedModels/best_model.h5"""'], {'save_best_only': '(True)'}), "('../SavedModels/best_model.h5', save_best_only=True)\n", (10831, 10884), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((10946, 10998), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(3)', 'restore_best_weights': '(True)'}), '(patience=3, restore_best_weights=True)\n', (10959, 10998), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((11914, 11954), 'pandas.concat', 'pd.concat', (['[X_df2, y_test_pred2]'], {'axis': '(1)'}), '([X_df2, y_test_pred2], axis=1)\n', (11923, 11954), True, 'import pandas as pd\n'), ((12012, 12047), 'pandas.concat', 'pd.concat', (['[X_df2, y_test2]'], {'axis': '(1)'}), '([X_df2, y_test2], axis=1)\n', (12021, 12047), True, 'import pandas as pd\n'), ((12156, 12229), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['KL_div', 'abs_distance', 'y_pred', 'correctness']"}), "(columns=['KL_div', 'abs_distance', 'y_pred', 'correctness'])\n", (12168, 12229), True, 'import pandas as pd\n'), ((13259, 13309), 'matplotlib.pyplot.bar', 'plt.bar', (['index', "correctness2['percent']"], {'width': '(0.7)'}), "(index, correctness2['percent'], width=0.7)\n", (13266, 13309), True, 'import matplotlib.pyplot as plt\n'), ((13473, 13489), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(120)'], {}), '(0, 120)\n', (13481, 13489), True, 'import matplotlib.pyplot as plt\n'), ((13493, 13520), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""KL Divergence"""'], {}), "('KL Divergence')\n", (13503, 13520), True, 'import matplotlib.pyplot as plt\n'), ((13525, 13548), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% correct"""'], {}), "('% correct')\n", (13535, 13548), True, 'import matplotlib.pyplot as plt\n'), ((14346, 14377), 'pandas.DataFrame', 'pd.DataFrame', (["X_df2['y_actual']"], {}), "(X_df2['y_actual'])\n", (14358, 14377), True, 'import pandas as pd\n'), ((8086, 8108), 'numpy.arange', 'np.arange', (['(0)', '(0.7)', '(0.1)'], {}), '(0, 0.7, 0.1)\n', (8095, 8108), True, 'import numpy as np\n'), ((13812, 13834), 'numpy.arange', 'np.arange', (['(0)', '(0.7)', '(0.1)'], {}), '(0, 0.7, 0.1)\n', (13821, 13834), True, 'import numpy as np\n'), ((1670, 1690), 'pandas.Series', 'pd.Series', (['y.iloc[i]'], {}), '(y.iloc[i])\n', (1679, 1690), True, 'import pandas as pd\n'), ((1797, 1817), 'pandas.Series', 'pd.Series', (['y.iloc[i]'], {}), '(y.iloc[i])\n', (1806, 1817), True, 'import pandas as pd\n'), ((2112, 2132), 'pandas.Series', 'pd.Series', (['y.iloc[i]'], {}), '(y.iloc[i])\n', (2121, 2132), True, 'import pandas as pd\n'), ((2239, 2259), 'pandas.Series', 'pd.Series', (['y.iloc[i]'], {}), '(y.iloc[i])\n', (2248, 2259), True, 'import pandas as pd\n'), ((2923, 2949), 'sklearn.utils.shuffle', 'shuffle', (['X'], {'random_state': 'j'}), '(X, random_state=j)\n', (2930, 2949), False, 'from sklearn.utils import shuffle\n'), ((3038, 3064), 'sklearn.utils.shuffle', 'shuffle', (['y'], {'random_state': 'j'}), '(y, random_state=j)\n', (3045, 3064), False, 'from sklearn.utils import shuffle\n'), ((4468, 4497), 'tensorflow.keras.layers.InputLayer', 'InputLayer', ([], {'input_shape': '(22,)'}), '(input_shape=(22,))\n', (4478, 4497), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((4525, 4553), 'tensorflow.keras.layers.Dense', 'Dense', (['(45)'], {'activation': '"""relu"""'}), "(45, activation='relu')\n", (4530, 4553), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((4582, 4612), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (4587, 4612), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((6134, 6159), 'pandas.DataFrame', 'pd.DataFrame', (['y_test_pred'], {}), '(y_test_pred)\n', (6146, 6159), True, 'import pandas as pd\n'), ((8289, 8311), 'numpy.arange', 'np.arange', (['(0)', '(0.7)', '(0.1)'], {}), '(0, 0.7, 0.1)\n', (8298, 8311), True, 'import numpy as np\n'), ((8375, 8393), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (8391, 8393), False, 'from sklearn.linear_model import LinearRegression\n'), ((10281, 10310), 'tensorflow.keras.layers.InputLayer', 'InputLayer', ([], {'input_shape': '(22,)'}), '(input_shape=(22,))\n', (10291, 10310), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((10338, 10366), 'tensorflow.keras.layers.Dense', 'Dense', (['(45)'], {'activation': '"""relu"""'}), "(45, activation='relu')\n", (10343, 10366), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((10395, 10425), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (10400, 10425), False, 'from tensorflow.keras.layers import InputLayer, Dense\n'), ((11852, 11878), 'pandas.DataFrame', 'pd.DataFrame', (['y_test_pred2'], {}), '(y_test_pred2)\n', (11864, 11878), True, 'import pandas as pd\n'), ((14008, 14030), 'numpy.arange', 'np.arange', (['(0)', '(0.7)', '(0.1)'], {}), '(0, 0.7, 0.1)\n', (14017, 14030), True, 'import numpy as np\n'), ((14093, 14111), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (14109, 14111), False, 'from sklearn.linear_model import LinearRegression\n'), ((3820, 3836), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (3834, 3836), False, 'from sklearn.preprocessing import OrdinalEncoder\n'), ((9649, 9665), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {}), '()\n', (9663, 9665), False, 'from sklearn.preprocessing import OrdinalEncoder\n'), ((7292, 7314), 'numpy.arange', 'np.arange', (['(0)', '(0.8)', '(0.1)'], {}), '(0, 0.8, 0.1)\n', (7301, 7314), True, 'import numpy as np\n'), ((7949, 7971), 'numpy.arange', 'np.arange', (['(0)', '(0.8)', '(0.1)'], {}), '(0, 0.8, 0.1)\n', (7958, 7971), True, 'import numpy as np\n'), ((13019, 13041), 'numpy.arange', 'np.arange', (['(0)', '(0.8)', '(0.1)'], {}), '(0, 0.8, 0.1)\n', (13028, 13041), True, 'import numpy as np\n'), ((13676, 13698), 'numpy.arange', 'np.arange', (['(0)', '(0.8)', '(0.1)'], {}), '(0, 0.8, 0.1)\n', (13685, 13698), True, 'import numpy as np\n'), ((6624, 6635), 'math.log', 'math.log', (['p'], {}), '(p)\n', (6632, 6635), False, 'import math\n'), ((6644, 6659), 'math.log', 'math.log', (['(1 - p)'], {}), '(1 - p)\n', (6652, 6659), False, 'import math\n'), ((12358, 12369), 'math.log', 'math.log', (['p'], {}), '(p)\n', (12366, 12369), False, 'import math\n'), ((12378, 12393), 'math.log', 'math.log', (['(1 - p)'], {}), '(1 - p)\n', (12386, 12393), False, 'import math\n')] |
import collections
import contextlib
import os.path
import typing
from contextlib import ExitStack
from pathlib import Path
from typing import BinaryIO, Dict, Optional, Generator, Iterator, Set
from mercury_engine_data_structures import formats, dread_data
from mercury_engine_data_structures.formats.base_resource import AssetId, BaseResource, NameOrAssetId, resolve_asset_id
from mercury_engine_data_structures.formats.pkg import PKGHeader, Pkg
from mercury_engine_data_structures.game_check import Game
class PkgEditor:
"""
Manages efficiently reading all PKGs in the game and writing out modifications to a new path.
_files_for_asset_id: mapping of asset id to all pkgs it can be found at
_ensured_asset_ids: mapping of pkg name to assets we'll copy into it when saving
_modified_resources: mapping of asset id to bytes. When saving, these asset ids are replaced
"""
_files_for_asset_id: Dict[AssetId, Set[str]]
_ensured_asset_ids: Dict[str, Set[AssetId]]
_modified_resources: Dict[AssetId, bytes]
def __init__(self, root: Path, target_game: Game = Game.DREAD):
all_pkgs = root.rglob("*.pkg")
self.files = {}
self.root = root
self.target_game = target_game
self.headers = {}
self._files_for_asset_id = collections.defaultdict(set)
self._ensured_asset_ids = {}
self._modified_resources = {}
for pkg_path in all_pkgs:
name = pkg_path.relative_to(root).as_posix()
self.files[name] = pkg_path
with pkg_path.open("rb") as f:
self.headers[name] = PKGHeader.parse_stream(f, target_game=target_game)
self._ensured_asset_ids[name] = set()
for entry in self.headers[name].file_entries:
self._files_for_asset_id[entry.asset_id].add(name)
def all_asset_ids(self) -> Iterator[AssetId]:
"""
Returns an iterator of all asset ids in the available pkgs.
"""
yield from self._files_for_asset_id.keys()
def all_asset_names(self) -> Iterator[str]:
"""
Returns an iterator of all known names of the present asset ids.
"""
for asset_id in self.all_asset_ids():
name = dread_data.name_for_asset_id(asset_id)
if name is not None:
yield name
def find_pkgs(self, asset_id: NameOrAssetId) -> Iterator[str]:
yield from self._files_for_asset_id[resolve_asset_id(asset_id)]
def get_raw_asset(self, asset_id: NameOrAssetId, in_pkg: Optional[str] = None) -> bytes:
asset_id = resolve_asset_id(asset_id)
if asset_id in self._modified_resources:
return self._modified_resources[asset_id]
for name, header in self.headers.items():
if in_pkg is not None and name != in_pkg:
continue
for entry in header.file_entries:
if entry.asset_id == asset_id:
with self.files[name].open("rb") as f:
f.seek(entry.start_offset)
return f.read(entry.end_offset - entry.start_offset)
raise ValueError(f"Unknown asset_id: {asset_id:0x}")
def get_parsed_asset(self, name: str, in_pkg: Optional[str] = None) -> BaseResource:
data = self.get_raw_asset(name, in_pkg)
file_format = os.path.splitext(name)[1][1:]
return formats.format_for(file_format).parse(data, target_game=self.target_game)
def replace_asset(self, asset_id: NameOrAssetId, new_data: typing.Union[bytes, BaseResource]):
if not isinstance(new_data, bytes):
new_data = new_data.build()
self._modified_resources[resolve_asset_id(asset_id)] = new_data
def ensure_present(self, pkg_name: str, asset_id: NameOrAssetId):
"""
Ensures the given pkg has the give assets, collecting from other pkgs if needed.
"""
if pkg_name not in self._ensured_asset_ids:
raise ValueError(f"Unknown pkg_name: {pkg_name}")
asset_id = resolve_asset_id(asset_id)
# If the pkg already has the given asset, do nothing
if pkg_name not in self._files_for_asset_id[asset_id]:
self._ensured_asset_ids[pkg_name].add(asset_id)
def save_modified_pkgs(self):
modified_pkgs = set()
for asset_id in self._modified_resources.keys():
modified_pkgs.update(self._files_for_asset_id[asset_id])
# Read all asset ids we need to copy somewhere else
asset_ids_to_copy = {}
for asset_ids in self._ensured_asset_ids.values():
for asset_id in asset_ids:
if asset_id not in asset_ids_to_copy:
asset_ids_to_copy[asset_id] = self.get_raw_asset(asset_id)
for pkg_name in modified_pkgs:
with self.files[pkg_name].open("rb") as f:
pkg = Pkg.parse_stream(f, target_game=self.target_game)
for asset_id, data in self._modified_resources.items():
if pkg_name in self._files_for_asset_id[asset_id]:
pkg.replace_asset(asset_id, data)
for asset_id in self._ensured_asset_ids[pkg_name]:
pkg.add_asset(asset_id, asset_ids_to_copy[asset_id])
self._files_for_asset_id[asset_id].add(pkg_name)
with self.files[pkg_name].open("wb") as f:
pkg.build_stream(f)
# Clear the ensured asset ids, since we've written these
self._ensured_asset_ids[pkg_name] = set()
self._modified_resources = {}
| [
"mercury_engine_data_structures.formats.format_for",
"mercury_engine_data_structures.formats.pkg.Pkg.parse_stream",
"mercury_engine_data_structures.dread_data.name_for_asset_id",
"collections.defaultdict",
"mercury_engine_data_structures.formats.base_resource.resolve_asset_id",
"mercury_engine_data_struct... | [((1300, 1328), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (1323, 1328), False, 'import collections\n'), ((2600, 2626), 'mercury_engine_data_structures.formats.base_resource.resolve_asset_id', 'resolve_asset_id', (['asset_id'], {}), '(asset_id)\n', (2616, 2626), False, 'from mercury_engine_data_structures.formats.base_resource import AssetId, BaseResource, NameOrAssetId, resolve_asset_id\n'), ((4056, 4082), 'mercury_engine_data_structures.formats.base_resource.resolve_asset_id', 'resolve_asset_id', (['asset_id'], {}), '(asset_id)\n', (4072, 4082), False, 'from mercury_engine_data_structures.formats.base_resource import AssetId, BaseResource, NameOrAssetId, resolve_asset_id\n'), ((2248, 2286), 'mercury_engine_data_structures.dread_data.name_for_asset_id', 'dread_data.name_for_asset_id', (['asset_id'], {}), '(asset_id)\n', (2276, 2286), False, 'from mercury_engine_data_structures import formats, dread_data\n'), ((3700, 3726), 'mercury_engine_data_structures.formats.base_resource.resolve_asset_id', 'resolve_asset_id', (['asset_id'], {}), '(asset_id)\n', (3716, 3726), False, 'from mercury_engine_data_structures.formats.base_resource import AssetId, BaseResource, NameOrAssetId, resolve_asset_id\n'), ((1616, 1666), 'mercury_engine_data_structures.formats.pkg.PKGHeader.parse_stream', 'PKGHeader.parse_stream', (['f'], {'target_game': 'target_game'}), '(f, target_game=target_game)\n', (1638, 1666), False, 'from mercury_engine_data_structures.formats.pkg import PKGHeader, Pkg\n'), ((2459, 2485), 'mercury_engine_data_structures.formats.base_resource.resolve_asset_id', 'resolve_asset_id', (['asset_id'], {}), '(asset_id)\n', (2475, 2485), False, 'from mercury_engine_data_structures.formats.base_resource import AssetId, BaseResource, NameOrAssetId, resolve_asset_id\n'), ((3409, 3440), 'mercury_engine_data_structures.formats.format_for', 'formats.format_for', (['file_format'], {}), '(file_format)\n', (3427, 3440), False, 'from mercury_engine_data_structures import formats, dread_data\n'), ((4899, 4948), 'mercury_engine_data_structures.formats.pkg.Pkg.parse_stream', 'Pkg.parse_stream', (['f'], {'target_game': 'self.target_game'}), '(f, target_game=self.target_game)\n', (4915, 4948), False, 'from mercury_engine_data_structures.formats.pkg import PKGHeader, Pkg\n')] |
#!/usr/bin/env python
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from conversion import read_imgs_masks
from os.path import isfile, basename
XERR=0.1
ELINEWIDTH=3
CAPSIZE=5
CAPTHICK=3
FMT='cD'
def harm_plot(ydata, labels, outPrefix, bshell_b):
'''
:param ydata: list of [y1, y2, y3, ...] where each yi is a list
:param labels: list of strings
:param outPrefix:
:return:
'''
outPrefix += f'_b{bshell_b}'
labels= list(labels)
num_series, num_sub= np.shape(ydata)
iter_obj= [i for i in range(num_series)]
# errorbar plot
plt.figure(1)
plt.grid(True)
for i in iter_obj:
x= list(i*np.ones((num_sub,)))
y= ydata[i]
plt.plot(x, y, 'r*')
plt.errorbar([i], np.mean(y), xerr=XERR, yerr=np.std(y),
ecolor='k', capsize=CAPSIZE, capthick=CAPTHICK, elinewidth=ELINEWIDTH, fmt=FMT)
plt.xticks(iter_obj, labels)
plt.title('Comparison of meanFA before and after harmonization')
plt.ylabel('meanFA over IIT_mean_FA_skeleton')
plt.savefig(outPrefix+'_ebarplot.png')
# plt.show()
# box plot
# plt.figure(2)
# plt.grid(True)
# for i in iter_obj:
# x = list(i * np.ones((num_sub,)))
# y = ydata[i]
# plt.plot(x, y, 'r*')
#
# plt.boxplot(ydata, labels=labels, positions=iter_obj,
# boxprops=dict(linewidth=4),
# medianprops=dict(linewidth=4),
# whiskerprops=dict(linewidth=2))
#
#
# plt.title(f'Comparison of boxplot before and after harmonization for b{bshell_b}')
# plt.ylabel('meanFA over IIT_mean_FA_skeleton')
# plt.savefig(outPrefix+'_boxplot.png')
# plt.show()
# return (outPrefix+'_ebarplot.png', outPrefix+'_boxplot.png')
return outPrefix+'_ebarplot.png'
def generate_csv(imgs, site_means, outPrefix, bshell_b):
try:
imgs, _= read_imgs_masks(imgs)
except:
pass
statFile = outPrefix + '_stat.csv'
if isfile(statFile):
df= pd.read_csv(statFile)
df= df.assign(**{f'meanFA b{bshell_b}':site_means})
else:
stat = {'subject': [basename(f) for f in imgs], f'meanFA b{bshell_b}': site_means}
df = pd.DataFrame(stat)
df.to_csv(statFile, index=False)
if __name__=='__main__':
sub=['hi','hello','go','come']
ref_mean= [0.46, 0.49, 0.44, 0.40]
target_mean_before= [0.42, 0.58, 0.43, 0.66]
target_mean_after= [0.5 , 0.45, 0.40, 0.55]
labels=['Reference','Target before','Target after']
bshell_b= 2000
harm_plot([ref_mean, target_mean_before, target_mean_after], labels, '/tmp/abc', bshell_b)
# harm_plot([ref_mean], ['Reference'], '/tmp/abc', bshell_b)
generate_csv(sub, ref_mean, '/tmp/abc', bshell_b)
| [
"conversion.read_imgs_masks",
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"os.path.isfile",
"matplotlib.pyplot.figure",
"os.path.basen... | [((60, 81), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (74, 81), False, 'import matplotlib\n'), ((553, 568), 'numpy.shape', 'np.shape', (['ydata'], {}), '(ydata)\n', (561, 568), True, 'import numpy as np\n'), ((640, 653), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (650, 653), True, 'import matplotlib.pyplot as plt\n'), ((658, 672), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (666, 672), True, 'import matplotlib.pyplot as plt\n'), ((956, 984), 'matplotlib.pyplot.xticks', 'plt.xticks', (['iter_obj', 'labels'], {}), '(iter_obj, labels)\n', (966, 984), True, 'import matplotlib.pyplot as plt\n'), ((989, 1053), 'matplotlib.pyplot.title', 'plt.title', (['"""Comparison of meanFA before and after harmonization"""'], {}), "('Comparison of meanFA before and after harmonization')\n", (998, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1104), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""meanFA over IIT_mean_FA_skeleton"""'], {}), "('meanFA over IIT_mean_FA_skeleton')\n", (1068, 1104), True, 'import matplotlib.pyplot as plt\n'), ((1109, 1149), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outPrefix + '_ebarplot.png')"], {}), "(outPrefix + '_ebarplot.png')\n", (1120, 1149), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2077), 'os.path.isfile', 'isfile', (['statFile'], {}), '(statFile)\n', (2067, 2077), False, 'from os.path import isfile, basename\n'), ((763, 783), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r*"""'], {}), "(x, y, 'r*')\n", (771, 783), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1983), 'conversion.read_imgs_masks', 'read_imgs_masks', (['imgs'], {}), '(imgs)\n', (1977, 1983), False, 'from conversion import read_imgs_masks\n'), ((2091, 2112), 'pandas.read_csv', 'pd.read_csv', (['statFile'], {}), '(statFile)\n', (2102, 2112), True, 'import pandas as pd\n'), ((2287, 2305), 'pandas.DataFrame', 'pd.DataFrame', (['stat'], {}), '(stat)\n', (2299, 2305), True, 'import pandas as pd\n'), ((811, 821), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (818, 821), True, 'import numpy as np\n'), ((714, 733), 'numpy.ones', 'np.ones', (['(num_sub,)'], {}), '((num_sub,))\n', (721, 733), True, 'import numpy as np\n'), ((839, 848), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (845, 848), True, 'import numpy as np\n'), ((2211, 2222), 'os.path.basename', 'basename', (['f'], {}), '(f)\n', (2219, 2222), False, 'from os.path import isfile, basename\n')] |
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pandas as pd
import numpy as np
import warnings
import rpy2.robjects as robjects
from rpy2.robjects import numpy2ri, pandas2ri, Formula
from rpy2.robjects.packages import importr
pandas2ri.activate()
numpy2ri.activate()
# import R libraries
DESeq2 = importr('DESeq2')
edgeR = importr('edgeR')
Limma = importr('limma')
stats = importr('stats')
to_dataframe = robjects.r('function(x) data.frame(x)')
class DE_rpy2:
"""
Running DESeq2, edgeR, limma through rpy2
input:
count_matrix: a pandas dataframe with each column as count
(float values in FPKM/RPKM are also acceptable as internal rounding will be done)
, and a id column for gene id
example:
id sampleA sampleB
geneA 5.1 1
geneB 4.2 5
geneC 1 2
design_matrix: a pandas dataframe with each column as a condition, and one row for one sample
Note that the sample name must be the index not a column
condition
sampleA1 treated
sampleA2 treated
sampleB1 untreated
sampleB2 untreated
design_formula: default to be the column name of design matrix, example: "~ condition""
If it contains multiple conditions, this formula must be customised,
or the DESeq2 will only consider the first condition.
gene_column: column name of gene id columns in count_matrix, default = 'id'
"""
def __init__(self, count_matrix, design_matrix, design_formula=None, gene_column='id'):
assert gene_column in count_matrix, \
'column: \'%s\', not found in count matrix' % gene_column
assert count_matrix.shape[1] - 1 == design_matrix.shape[0], \
'The number of rows in design matrix must ' \
'be equal to the number of samples in count matrix'
assert all(pd.isna(count_matrix)), \
'Null values are found in count matrix' \
'Please check it'
assert len(design_matrix.columns), \
'Columns names are needed in design matrix'
if 'float' in count_matrix.drop(gene_column, axis=1):
warnings.warn('DESeq2 and edgeR only accept integer counts\n'
'The values in count matrix are automatically rounded\n'
'In fact the FPKM/RPKM input is not encouraged by DESeq2 officially\n')
# parameters used in DESeq2
self.count_matrix = pandas2ri.py2ri(count_matrix.drop(gene_column, axis=1).astype('int'))
self.design_matrix = pandas2ri.py2ri(design_matrix)
self.gene_ids = count_matrix[gene_column]
self.gene_column = gene_column
self.deseq2_result = None
self.deseq2_label = None
if design_formula is None:
condition = design_matrix.columns[0]
if len(design_matrix.columns) > 1:
warnings.warn('Multiple conditions are set in design matrix,\n'
'you\'d better customise the design formula.\n'
'Here it only considers the first condition\n')
self.design_formula = Formula('~ ' + condition)
else:
self.design_formula = Formula(design_formula)
# parameters used in edgeR
self.edgeR_group = numpy2ri.py2ri(design_matrix.iloc[:, 0].values)
self.edgeR_gene_names = numpy2ri.py2ri(count_matrix[gene_column].values)
self.edgeR_result = None
self.edgeR_label = None
# parameters used in limma
self.limma_result = None
self.limma_label = None
self.final_label = None
def deseq2(self, threshold=0.05, **kwargs):
"""
Run the standard DESeq2 workflow.
Get the DESeq2 results as DataFrame.
Return the label of each gene: 0 for not differentially expressed,
1 for differentially expressed.
:param threshold: threshold for the adjusted p-value.
default = 0.05.
:param kwargs: parameters of DESeq2 functions.
See official instructions for details:
http://www.bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html
:return:
label: pandas.DataFrame format with 2 columns: gene ids and labels
"""
# Run DESeq2 workflow
dds = DESeq2.DESeqDataSetFromMatrix(countData=self.count_matrix,
colData=self.design_matrix,
design=self.design_formula)
dds = DESeq2.DESeq(dds, **kwargs)
res = DESeq2.results(dds, **kwargs)
# Store the output matrix as DataFrame
self.deseq2_result = pandas2ri.ri2py(to_dataframe(res))
self.deseq2_result[self.gene_column] = self.gene_ids
# The adjusted p-value in the DESeq2 results
# may contain NAN
if any(pd.isna(self.deseq2_result['padj'].values)):
warnings.warn('There exist NAN in the adjusted p-value\n'
'see https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/'
'inst/doc/DESeq2.html#why-are-some-p-values-set-to-na\n')
# Reject the H0 hypothesis if p-value < threshold
labels = [int(x) for x in (self.deseq2_result['padj'] < threshold)]
label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': labels})
self.deseq2_label = label
return label
def edger(self, threshold=0.05):
"""
Run the standard edgeR workflow.
Get the edgR results as DataFrame.
Return the label of each gene:
0 for not differentially expressed,
1 for differentially expressed.
:param threshold: threshold for the p-value.
default = 0.05.
See official instructions for details:
https://www.bioconductor.org/packages/release/bioc/vignettes/edgeR/inst/doc/edgeRUsersGuide.pdf
:return:
label: pandas.DataFrame format with 2 columns: gene ids and labels
"""
# run edgeR workflow
# Create the DGEList object
dgList = edgeR.DGEList(counts=self.count_matrix, group=self.edgeR_group, genes=self.edgeR_gene_names)
# Normalize
dgList = edgeR.calcNormFactors(dgList, method="TMM")
# Setting up the model
robjects.r.assign('edgeR_group', self.edgeR_group)
designMat = stats.model_matrix(Formula('~ edgeR_group'))
# Estimating Dispersions
dgList = edgeR.estimateGLMCommonDisp(dgList, design=designMat)
dgList = edgeR.estimateGLMTrendedDisp(dgList, design=designMat)
dgList = edgeR.estimateGLMTagwiseDisp(dgList, design=designMat)
# Differential Expression
fit = edgeR.glmQLFit(dgList, designMat)
test = edgeR.glmQLFTest(fit)
res = edgeR.topTags(test, n=self.count_matrix.nrow)
res_df = pandas2ri.ri2py(to_dataframe(res))
# Sort the result on gene ids
gene_df = pd.DataFrame({'genes': self.gene_ids})
self.edgeR_result = pd.merge(gene_df, res_df, how='left')
# Reject the H0 hypothesis
labels = [int(x) for x in (self.edgeR_result['PValue'] < threshold)]
label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': labels})
self.edgeR_label = label
return label
def limma(self, threshold=0.05):
"""
Run the standard limma workflow.
Get the limma results as DataFrame.
Return the label of each gene:
0 for not differentially expressed,
1 for differentially expressed.
:param threshold: threshold for the p-value.
default = 0.05.
See official instructions for details:
https://ucdavis-bioinformatics-training.github.io/2018-June-RNA-Seq-Workshop/thursday/DE.html
:return:
label: pandas.DataFrame format with 2 columns: gene ids and labels
"""
# Create the DGEList object
dgList = edgeR.DGEList(counts=self.count_matrix, group=self.edgeR_group, genes=self.edgeR_gene_names)
# Normalize
dgList = edgeR.calcNormFactors(dgList, method="TMM")
# Setting up the model
robjects.r.assign('edgeR_group', self.edgeR_group)
designMat = stats.model_matrix(Formula('~ edgeR_group'))
# voom
v = Limma.voom(dgList, designMat)
# fitting
fit = Limma.lmFit(v, designMat)
fit = Limma.eBayes(fit)
res = Limma.topTable(fit, n=self.count_matrix.nrow)
res_df = pandas2ri.ri2py(to_dataframe(res))
# Sort the result on gene ids
gene_df = pd.DataFrame({'genes': self.gene_ids})
self.limma_result = pd.merge(gene_df, res_df, how='left')
# Reject the H0 hypothesis
labels = [int(x) for x in (self.limma_result['adj.P.Val'] < threshold)]
label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': labels})
self.limma_label = label
return label
def plot_label_difference(self):
"""
Plot the Venn diagram of the 3 label output.
Since we only interest in the differentially expressed genes.
The number on Venn diagram shows the number of samples labeled as 1.
Say differentially expressed genes.
"""
if self.limma_label is None:
warnings.warn('Seems you haven\'t get limma label\n'
'Automatically running limma...')
self.limma_label = self.limma()
if self.deseq2_label is None:
warnings.warn('Seems you haven\'t get DESeq2 label\n'
'Automatically running DESeq2...')
self.deseq2_label = self.deseq2()
if self.edgeR_label is None:
warnings.warn('Seems you haven\'t get edgeR label\n'
'Automatically running edgeR...')
self.edgeR_label = self.edger()
# Import the plot package
from matplotlib_venn import venn3
import matplotlib.pyplot as plt
labels = np.array([self.deseq2_label['label'].values, self.edgeR_label['label'].values,
self.limma_label['label'].values]).T
names = ['DESeq2', 'edgeR', 'limma']
venn_df = pd.DataFrame(data=labels, columns=names)
sets = {'000': 0, '001': 0, '010': 0, '011': 0, '100': 0, '101': 0, '110': 0, '111': 0}
for i in range(venn_df.shape[0]):
loc = [str(num) for num in venn_df.iloc[i, :]]
loc = loc[0] + loc[1] + loc[2]
sets[loc] += 1
venn3(sets, set_labels=names)
plt.show()
return sets
def get_final_label(self, method='inner'):
"""
There are 2 methods availabel:
inner: set those genes as differentially expressed,
say label 1, if all 3 tools agreed
vote: set those genes as differentially expressed,
say label 1, if all 2 out of the 3 tools agreed
union: set those genes as differentially expressed,
say label 1, as long as 1 tool agreed
"""
label = None
menu = ['inner', 'vote', 'union']
assert method in menu, \
'Please choose the correct method'
if self.limma_label is None:
warnings.warn('Seems you haven\'t get limma label\n'
'Automatically running limma...')
self.limma_label = self.limma()
if self.deseq2_label is None:
warnings.warn('Seems you haven\'t get DESeq2 label\n'
'Automatically running DESeq2...')
self.deseq2_label = self.deseq2()
if self.edgeR_label is None:
warnings.warn('Seems you haven\'t get edgeR label\n'
'Automatically running edgeR...')
self.edgeR_label = self.edger()
labels = self.deseq2_label['label'].values + self.edgeR_label['label'].values + self.limma_label['label'].values
if method == 'inner':
label = [int(x) for x in (labels == 3)]
if method == 'vote':
label = [int(x) for x in (labels >= 2)]
if method == 'union':
label = [int(x) for x in (labels >= 1)]
self.final_label = pd.DataFrame({self.gene_column: self.gene_ids, 'label': label})
return self.final_label
| [
"rpy2.robjects.pandas2ri.activate",
"rpy2.robjects.pandas2ri.py2ri",
"rpy2.robjects.r.assign",
"rpy2.robjects.numpy2ri.py2ri",
"pandas.merge",
"matplotlib_venn.venn3",
"rpy2.robjects.Formula",
"rpy2.robjects.packages.importr",
"numpy.array",
"pandas.DataFrame",
"warnings.warn",
"pandas.isna",
... | [((1288, 1308), 'rpy2.robjects.pandas2ri.activate', 'pandas2ri.activate', ([], {}), '()\n', (1306, 1308), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((1310, 1329), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (1327, 1329), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((1364, 1381), 'rpy2.robjects.packages.importr', 'importr', (['"""DESeq2"""'], {}), "('DESeq2')\n", (1371, 1381), False, 'from rpy2.robjects.packages import importr\n'), ((1391, 1407), 'rpy2.robjects.packages.importr', 'importr', (['"""edgeR"""'], {}), "('edgeR')\n", (1398, 1407), False, 'from rpy2.robjects.packages import importr\n'), ((1417, 1433), 'rpy2.robjects.packages.importr', 'importr', (['"""limma"""'], {}), "('limma')\n", (1424, 1433), False, 'from rpy2.robjects.packages import importr\n'), ((1443, 1459), 'rpy2.robjects.packages.importr', 'importr', (['"""stats"""'], {}), "('stats')\n", (1450, 1459), False, 'from rpy2.robjects.packages import importr\n'), ((1478, 1517), 'rpy2.robjects.r', 'robjects.r', (['"""function(x) data.frame(x)"""'], {}), "('function(x) data.frame(x)')\n", (1488, 1517), True, 'import rpy2.robjects as robjects\n'), ((3682, 3712), 'rpy2.robjects.pandas2ri.py2ri', 'pandas2ri.py2ri', (['design_matrix'], {}), '(design_matrix)\n', (3697, 3712), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((4447, 4494), 'rpy2.robjects.numpy2ri.py2ri', 'numpy2ri.py2ri', (['design_matrix.iloc[:, 0].values'], {}), '(design_matrix.iloc[:, 0].values)\n', (4461, 4494), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((4528, 4576), 'rpy2.robjects.numpy2ri.py2ri', 'numpy2ri.py2ri', (['count_matrix[gene_column].values'], {}), '(count_matrix[gene_column].values)\n', (4542, 4576), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((6570, 6634), 'pandas.DataFrame', 'pd.DataFrame', (["{self.gene_column: self.gene_ids, 'label': labels}"], {}), "({self.gene_column: self.gene_ids, 'label': labels})\n", (6582, 6634), True, 'import pandas as pd\n'), ((7618, 7668), 'rpy2.robjects.r.assign', 'robjects.r.assign', (['"""edgeR_group"""', 'self.edgeR_group'], {}), "('edgeR_group', self.edgeR_group)\n", (7635, 7668), True, 'import rpy2.robjects as robjects\n'), ((8281, 8319), 'pandas.DataFrame', 'pd.DataFrame', (["{'genes': self.gene_ids}"], {}), "({'genes': self.gene_ids})\n", (8293, 8319), True, 'import pandas as pd\n'), ((8349, 8386), 'pandas.merge', 'pd.merge', (['gene_df', 'res_df'], {'how': '"""left"""'}), "(gene_df, res_df, how='left')\n", (8357, 8386), True, 'import pandas as pd\n'), ((8520, 8584), 'pandas.DataFrame', 'pd.DataFrame', (["{self.gene_column: self.gene_ids, 'label': labels}"], {}), "({self.gene_column: self.gene_ids, 'label': labels})\n", (8532, 8584), True, 'import pandas as pd\n'), ((9536, 9586), 'rpy2.robjects.r.assign', 'robjects.r.assign', (['"""edgeR_group"""', 'self.edgeR_group'], {}), "('edgeR_group', self.edgeR_group)\n", (9553, 9586), True, 'import rpy2.robjects as robjects\n'), ((9978, 10016), 'pandas.DataFrame', 'pd.DataFrame', (["{'genes': self.gene_ids}"], {}), "({'genes': self.gene_ids})\n", (9990, 10016), True, 'import pandas as pd\n'), ((10046, 10083), 'pandas.merge', 'pd.merge', (['gene_df', 'res_df'], {'how': '"""left"""'}), "(gene_df, res_df, how='left')\n", (10054, 10083), True, 'import pandas as pd\n'), ((10220, 10284), 'pandas.DataFrame', 'pd.DataFrame', (["{self.gene_column: self.gene_ids, 'label': labels}"], {}), "({self.gene_column: self.gene_ids, 'label': labels})\n", (10232, 10284), True, 'import pandas as pd\n'), ((11640, 11680), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'labels', 'columns': 'names'}), '(data=labels, columns=names)\n', (11652, 11680), True, 'import pandas as pd\n'), ((11962, 11991), 'matplotlib_venn.venn3', 'venn3', (['sets'], {'set_labels': 'names'}), '(sets, set_labels=names)\n', (11967, 11991), False, 'from matplotlib_venn import venn3\n'), ((12001, 12011), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12009, 12011), True, 'import matplotlib.pyplot as plt\n'), ((13674, 13737), 'pandas.DataFrame', 'pd.DataFrame', (["{self.gene_column: self.gene_ids, 'label': label}"], {}), "({self.gene_column: self.gene_ids, 'label': label})\n", (13686, 13737), True, 'import pandas as pd\n'), ((2976, 2997), 'pandas.isna', 'pd.isna', (['count_matrix'], {}), '(count_matrix)\n', (2983, 2997), True, 'import pandas as pd\n'), ((3269, 3464), 'warnings.warn', 'warnings.warn', (['"""DESeq2 and edgeR only accept integer counts\nThe values in count matrix are automatically rounded\nIn fact the FPKM/RPKM input is not encouraged by DESeq2 officially\n"""'], {}), '(\n """DESeq2 and edgeR only accept integer counts\nThe values in count matrix are automatically rounded\nIn fact the FPKM/RPKM input is not encouraged by DESeq2 officially\n"""\n )\n', (3282, 3464), False, 'import warnings\n'), ((4281, 4306), 'rpy2.robjects.Formula', 'Formula', (["('~ ' + condition)"], {}), "('~ ' + condition)\n", (4288, 4306), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((4357, 4380), 'rpy2.robjects.Formula', 'Formula', (['design_formula'], {}), '(design_formula)\n', (4364, 4380), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((6116, 6158), 'pandas.isna', 'pd.isna', (["self.deseq2_result['padj'].values"], {}), "(self.deseq2_result['padj'].values)\n", (6123, 6158), True, 'import pandas as pd\n'), ((6174, 6366), 'warnings.warn', 'warnings.warn', (['"""There exist NAN in the adjusted p-value\nsee https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html#why-are-some-p-values-set-to-na\n"""'], {}), '(\n """There exist NAN in the adjusted p-value\nsee https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html#why-are-some-p-values-set-to-na\n"""\n )\n', (6187, 6366), False, 'import warnings\n'), ((7709, 7733), 'rpy2.robjects.Formula', 'Formula', (['"""~ edgeR_group"""'], {}), "('~ edgeR_group')\n", (7716, 7733), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((9627, 9651), 'rpy2.robjects.Formula', 'Formula', (['"""~ edgeR_group"""'], {}), "('~ edgeR_group')\n", (9634, 9651), False, 'from rpy2.robjects import numpy2ri, pandas2ri, Formula\n'), ((10708, 10798), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get limma label\nAutomatically running limma..."""'], {}), '(\n """Seems you haven\'t get limma label\nAutomatically running limma...""")\n', (10721, 10798), False, 'import warnings\n'), ((10919, 11011), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get DESeq2 label\nAutomatically running DESeq2..."""'], {}), '(\n """Seems you haven\'t get DESeq2 label\nAutomatically running DESeq2...""")\n', (10932, 11011), False, 'import warnings\n'), ((11133, 11223), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get edgeR label\nAutomatically running edgeR..."""'], {}), '(\n """Seems you haven\'t get edgeR label\nAutomatically running edgeR...""")\n', (11146, 11223), False, 'import warnings\n'), ((11431, 11549), 'numpy.array', 'np.array', (["[self.deseq2_label['label'].values, self.edgeR_label['label'].values, self.\n limma_label['label'].values]"], {}), "([self.deseq2_label['label'].values, self.edgeR_label['label'].\n values, self.limma_label['label'].values])\n", (11439, 11549), True, 'import numpy as np\n'), ((12679, 12769), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get limma label\nAutomatically running limma..."""'], {}), '(\n """Seems you haven\'t get limma label\nAutomatically running limma...""")\n', (12692, 12769), False, 'import warnings\n'), ((12890, 12982), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get DESeq2 label\nAutomatically running DESeq2..."""'], {}), '(\n """Seems you haven\'t get DESeq2 label\nAutomatically running DESeq2...""")\n', (12903, 12982), False, 'import warnings\n'), ((13104, 13194), 'warnings.warn', 'warnings.warn', (['"""Seems you haven\'t get edgeR label\nAutomatically running edgeR..."""'], {}), '(\n """Seems you haven\'t get edgeR label\nAutomatically running edgeR...""")\n', (13117, 13194), False, 'import warnings\n'), ((4024, 4187), 'warnings.warn', 'warnings.warn', (['"""Multiple conditions are set in design matrix,\nyou\'d better customise the design formula.\nHere it only considers the first condition\n"""'], {}), '(\n """Multiple conditions are set in design matrix,\nyou\'d better customise the design formula.\nHere it only considers the first condition\n"""\n )\n', (4037, 4187), False, 'import warnings\n')] |
from CGATReport.Tracker import *
from CGATReport.Utils import PARAMS as P
from IsoformReport import *
###############################################################################
# parse params
###############################################################################
DATABASE = P.get('', P.get('sql_backend', 'sqlite:///./csvdb'))
ANNOTATIONS_DATABASE = P.get('annotations_database')
###############################################################################
# trackers
###############################################################################
class DeseqFeatureResultsGenes(IsoformTracker):
pattern = "deseq2_featurecounts__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM deseq2_featurecounts__%(track)s_genes_results
AS A ORDER BY A.significant DESC,
A.l2fold ASC;
'''
return self.getAll(statement)
class EdgerFeatureResultsGenes(IsoformTracker):
pattern = "edger_featurecounts__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM edger_featurecounts__%(track)s_genes_results
AS A ORDER BY A.significant DESC,
A.l2fold ASC
'''
return self.getAll(statement)
class DeseqKallistoResultsGenes(IsoformTracker):
pattern = "deseq2_kallisto__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM deseq2_kallisto__%(track)s_genes_results
AS A ORDER BY A.significant DESC,
A.l2fold ASC
'''
return self.getAll(statement)
class EdgerKallistoResultsGenes(IsoformTracker):
pattern = "edger_kallisto__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM edger_kallisto__%(track)s_genes_results AS
A ORDER BY A.significant DESC,
A.l2fold ASC
'''
return self.getAll(statement)
class SleuthKallistoResultsGenes(IsoformTracker):
pattern = "sleuth_kallisto__(.*)_genes_results"
def __call__(self, track, slice=None):
statement = '''
SELECT A.control_name, A.treatment_name, A.control_mean,
A.treatment_mean, A.test_id, A.l2fold, A.p_value, A.p_value_adj,
A.significant FROM sleuth_kallisto__%(track)s_genes_results AS
A ORDER BY A.significant DESC,
A.l2fold ASC
'''
return self.getAll(statement)
| [
"CGATReport.Utils.PARAMS.get"
] | [((367, 396), 'CGATReport.Utils.PARAMS.get', 'P.get', (['"""annotations_database"""'], {}), "('annotations_database')\n", (372, 396), True, 'from CGATReport.Utils import PARAMS as P\n'), ((301, 342), 'CGATReport.Utils.PARAMS.get', 'P.get', (['"""sql_backend"""', '"""sqlite:///./csvdb"""'], {}), "('sql_backend', 'sqlite:///./csvdb')\n", (306, 342), True, 'from CGATReport.Utils import PARAMS as P\n')] |
import torch
from torch import Tensor
from torch.utils.data import Dataset
from torchvision import io
from pathlib import Path
from typing import Tuple
from torchvision import transforms as T
class CelebAMaskHQ(Dataset):
CLASSES = [
'background', 'skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear',
'r_ear', 'mouth', 'u_lip', 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth'
]
PALETTE = torch.tensor([
[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0],
[102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]
])
def __init__(self, root: str, split: str = 'train', transform = None) -> None:
super().__init__()
assert split in ['train', 'val', 'test']
self.root = Path(root)
self.transform = transform
self.n_classes = len(self.CLASSES)
self.ignore_label = 255
self.resize = T.Resize((512, 512))
with open(self.root / f'{split}_list.txt') as f:
self.files = f.read().splitlines()
if not self.files:
raise Exception(f"No images found in {root}")
print(f"Found {len(self.files)} {split} images.")
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:
img_path = self.root / 'CelebA-HQ-img' / f"{self.files[index]}.jpg"
lbl_path = self.root / 'CelebAMask-HQ-label' / f"{self.files[index]}.png"
image = io.read_image(str(img_path))
image = self.resize(image)
label = io.read_image(str(lbl_path))
if self.transform:
image, label = self.transform(image, label)
return image, label.squeeze().long()
if __name__ == '__main__':
from semseg.utils.visualize import visualize_dataset_sample
visualize_dataset_sample(CelebAMaskHQ, '/home/sithu/datasets/CelebAMask-HQ') | [
"torchvision.transforms.Resize",
"torch.tensor",
"semseg.utils.visualize.visualize_dataset_sample",
"pathlib.Path"
] | [((449, 745), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, \n 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0], [\n 102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153],\n [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]]'], {}), '([[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51,\n 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255,\n 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51,\n 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]])\n', (461, 745), False, 'import torch\n'), ((1987, 2063), 'semseg.utils.visualize.visualize_dataset_sample', 'visualize_dataset_sample', (['CelebAMaskHQ', '"""/home/sithu/datasets/CelebAMask-HQ"""'], {}), "(CelebAMaskHQ, '/home/sithu/datasets/CelebAMask-HQ')\n", (2011, 2063), False, 'from semseg.utils.visualize import visualize_dataset_sample\n'), ((937, 947), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (941, 947), False, 'from pathlib import Path\n'), ((1080, 1100), 'torchvision.transforms.Resize', 'T.Resize', (['(512, 512)'], {}), '((512, 512))\n', (1088, 1100), True, 'from torchvision import transforms as T\n')] |
import sys
import table_gen
import doc_gen
import graph_gen
import array_gen
import os
def main(argv):
outdir = './sf1_dataset_output/'
if os.path.exists(outdir):
os.system("rm -rf "+outdir)
os.mkdir(outdir)
earthquake_dirpath = argv[1]
shelter_dirpath = argv[2]
gps_dirpath = argv[3]
roadnetwork_dirpath = argv[4]
site_dirpath = argv[5]
finedust_dirpath = argv[6]
print("------ Disaster Data Generation ------")
print("Start Earthquake Generation")
table_gen.earthquake_gen(earthquake_dirpath, outdir)
print("Start Gps Generation")
table_gen.gps_gen(gps_dirpath, outdir)
print("Start Road Network Generation")
graph_gen.roadnetwork_gen(roadnetwork_dirpath, outdir)
print("Start Site Generation")
doc_gen.site_gen(site_dirpath, outdir)
print("Start Shelter Generation")
table_gen.shelter_gen(shelter_dirpath, outdir)
print("Start Finedust Generation")
array_gen.finedust_gen(finedust_dirpath, outdir)
array_gen.finedust_idx_gen(finedust_dirpath, outdir)
if __name__ == '__main__':
main(sys.argv) | [
"os.path.exists",
"table_gen.shelter_gen",
"array_gen.finedust_gen",
"table_gen.gps_gen",
"doc_gen.site_gen",
"os.mkdir",
"array_gen.finedust_idx_gen",
"graph_gen.roadnetwork_gen",
"os.system",
"table_gen.earthquake_gen"
] | [((148, 170), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (162, 170), False, 'import os\n'), ((212, 228), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (220, 228), False, 'import os\n'), ((510, 562), 'table_gen.earthquake_gen', 'table_gen.earthquake_gen', (['earthquake_dirpath', 'outdir'], {}), '(earthquake_dirpath, outdir)\n', (534, 562), False, 'import table_gen\n'), ((602, 640), 'table_gen.gps_gen', 'table_gen.gps_gen', (['gps_dirpath', 'outdir'], {}), '(gps_dirpath, outdir)\n', (619, 640), False, 'import table_gen\n'), ((689, 743), 'graph_gen.roadnetwork_gen', 'graph_gen.roadnetwork_gen', (['roadnetwork_dirpath', 'outdir'], {}), '(roadnetwork_dirpath, outdir)\n', (714, 743), False, 'import graph_gen\n'), ((784, 822), 'doc_gen.site_gen', 'doc_gen.site_gen', (['site_dirpath', 'outdir'], {}), '(site_dirpath, outdir)\n', (800, 822), False, 'import doc_gen\n'), ((866, 912), 'table_gen.shelter_gen', 'table_gen.shelter_gen', (['shelter_dirpath', 'outdir'], {}), '(shelter_dirpath, outdir)\n', (887, 912), False, 'import table_gen\n'), ((957, 1005), 'array_gen.finedust_gen', 'array_gen.finedust_gen', (['finedust_dirpath', 'outdir'], {}), '(finedust_dirpath, outdir)\n', (979, 1005), False, 'import array_gen\n'), ((1010, 1062), 'array_gen.finedust_idx_gen', 'array_gen.finedust_idx_gen', (['finedust_dirpath', 'outdir'], {}), '(finedust_dirpath, outdir)\n', (1036, 1062), False, 'import array_gen\n'), ((180, 209), 'os.system', 'os.system', (["('rm -rf ' + outdir)"], {}), "('rm -rf ' + outdir)\n", (189, 209), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/mozilla/bleach
# SOURCE: https://bleach.readthedocs.io/en/latest/clean.html#allowed-protocols-protocols
# pip install bleach
import bleach
# List of allowed protocols
print('List of allowed protocols:', bleach.sanitizer.ALLOWED_PROTOCOLS)
# ['http', 'https', 'mailto']
print(
bleach.clean(
'<a href="smb://more_text">allowed protocol</a>'
)
)
# <a>allowed protocol</a>
print(
bleach.clean(
'<a href="smb://more_text">allowed protocol</a>',
protocols=['http', 'https', 'smb']
)
)
# <a href="smb://more_text">allowed protocol</a>
print(
bleach.clean(
'<a href="smb://more_text">allowed protocol</a>',
protocols=bleach.ALLOWED_PROTOCOLS + ['smb']
)
)
# <a href="smb://more_text">allowed protocol</a>
| [
"bleach.clean"
] | [((388, 450), 'bleach.clean', 'bleach.clean', (['"""<a href="smb://more_text">allowed protocol</a>"""'], {}), '(\'<a href="smb://more_text">allowed protocol</a>\')\n', (400, 450), False, 'import bleach\n'), ((505, 608), 'bleach.clean', 'bleach.clean', (['"""<a href="smb://more_text">allowed protocol</a>"""'], {'protocols': "['http', 'https', 'smb']"}), '(\'<a href="smb://more_text">allowed protocol</a>\', protocols=[\n \'http\', \'https\', \'smb\'])\n', (517, 608), False, 'import bleach\n'), ((689, 802), 'bleach.clean', 'bleach.clean', (['"""<a href="smb://more_text">allowed protocol</a>"""'], {'protocols': "(bleach.ALLOWED_PROTOCOLS + ['smb'])"}), '(\'<a href="smb://more_text">allowed protocol</a>\', protocols=\n bleach.ALLOWED_PROTOCOLS + [\'smb\'])\n', (701, 802), False, 'import bleach\n')] |
import symro
import symro.src.handlers.metaentitybuilder as eb
from symro.src.parsing.amplparser import AMPLParser
from symro.test.test_util import *
# Scripts
# ----------------------------------------------------------------------------------------------------------------------
FIXED_DIM_SCRIPT = """
set I = {1, 2, 3};
set J = {'A', 'B', 'C'};
set K = {(1, 'A'), (2, 'B'), (3, 'C')};
set L = {(1, 'A'), (2, 'B'), (3, 'C')};
var x {(i,'A') in K, (i,l) in L};
"""
SUB_SET_SCRIPT = """
set NUM_SET = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
set EVEN_SET = {0, 2, 4, 6, 8};
set LETTER_SET = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', "I", 'J'};
set VOWEL_SET = {'A', 'E', "I"};
set NUM_LETTER_SET = {NUM_SET, LETTER_SET};
set INDEXED_SET{i in NUM_SET} = 0..i;
set INDEXED_SET_2{i in NUM_SET} = {(i,j) in NUM_LETTER_SET};
var VAR_1{i in NUM_SET} >= 0;
var VAR_2{i in NUM_SET, j in LETTER_SET} >= 0;
var VAR_test{i in NUM_SET: 1 in union{i1 in NUM_SET}{1..1: i == 5}};
minimize OBJ: 0;
display {i in NUM_SET: 1 in union{i1 in NUM_SET}{1..1: i == 5}};
"""
# Tests
# ----------------------------------------------------------------------------------------------------------------------
def run_entity_builder_test_group():
tests = [("Build meta-entity with fixed dimensions", fixed_dimension_test),
("Build sub-meta-entities", sub_meta_entity_builder_test)]
return run_tests(tests)
def fixed_dimension_test():
problem = symro.read_ampl(script_literal=FIXED_DIM_SCRIPT,
working_dir_path=SCRIPT_DIR_PATH)
results = []
x = problem.get_meta_entity("x")
results.append(check_str_result(x.idx_set_reduced_dim, 2))
results.append(check_str_result(x.idx_set_reduced_dummy_element, ['i', 'l']))
return results
def sub_meta_entity_builder_test():
problem = symro.read_ampl(script_literal=SUB_SET_SCRIPT,
working_dir_path=SCRIPT_DIR_PATH)
ampl_parser = AMPLParser(problem)
results = []
mv_1 = problem.get_meta_entity("VAR_1")
mv_2 = problem.get_meta_entity("VAR_2")
# test 1: {i in NUM_SET} VAR_1[i]
idx_node = ampl_parser.parse_entity_index("[i]")
sub_meta_entity = eb.build_sub_meta_entity(
problem=problem,
idx_subset_node=mv_1.idx_set_node,
meta_entity=mv_1,
entity_idx_node=idx_node)
results.append(check_str_result(sub_meta_entity, "var VAR_1{i in NUM_SET}"))
# test 2: {i in EVEN_SET} VAR_1[i]
idx_subset_node = ampl_parser.parse_indexing_set_definition("{i in EVEN_SET}")
idx_node = ampl_parser.parse_entity_index("[i]")
sub_meta_entity = eb.build_sub_meta_entity(
problem=problem,
idx_subset_node=idx_subset_node,
meta_entity=mv_1,
entity_idx_node=idx_node)
results.append(check_str_result(sub_meta_entity, "var VAR_1{i in NUM_SET: i in {i2 in EVEN_SET}}"))
# test 3: {i in NUM_SET} VAR_1[5]
idx_node = ampl_parser.parse_entity_index("[5]")
sub_meta_entity = eb.build_sub_meta_entity(
problem=problem,
idx_subset_node=mv_1.idx_set_node,
meta_entity=mv_1,
entity_idx_node=idx_node)
results.append(check_str_result(sub_meta_entity, "var VAR_1{i in NUM_SET: i == 5}"))
# test 4: {i in EVEN_SET, j in VOWEL_SET} VAR_2[i,j]
idx_subset_node = ampl_parser.parse_indexing_set_definition("{i in EVEN_SET, j in VOWEL_SET}")
idx_node = ampl_parser.parse_entity_index("[i,j]")
sub_meta_entity = eb.build_sub_meta_entity(
problem=problem,
idx_subset_node=idx_subset_node,
meta_entity=mv_2,
entity_idx_node=idx_node)
s = "var VAR_2{i in NUM_SET, j in LETTER_SET: (i,j) in {i3 in EVEN_SET, j1 in VOWEL_SET}}"
results.append(check_str_result(sub_meta_entity, s))
# test 5: {i in NUM_SET, j in INDEXED_SET[i]} VAR_1[j]
idx_subset_node = ampl_parser.parse_indexing_set_definition("{i in NUM_SET, j in INDEXED_SET[i]}")
idx_node = ampl_parser.parse_entity_index("[j]")
sub_meta_entity = eb.build_sub_meta_entity(
problem=problem,
idx_subset_node=idx_subset_node,
meta_entity=mv_1,
entity_idx_node=idx_node)
s = "var VAR_1{i in NUM_SET: i in union{i4 in NUM_SET}{j2 in INDEXED_SET[i]}}"
results.append(check_str_result(sub_meta_entity, s))
# test 6: {i in NUM_SET, j in INDEXED_SET_2[i]} VAR_2[i,j]
idx_subset_node = ampl_parser.parse_indexing_set_definition("{i in NUM_SET, j in INDEXED_SET_2[i]}")
idx_node = ampl_parser.parse_entity_index("[j,k]")
sub_meta_entity = eb.build_sub_meta_entity(
problem=problem,
idx_subset_node=idx_subset_node,
meta_entity=mv_2,
entity_idx_node=idx_node)
s = "var VAR_2{i in NUM_SET, j in LETTER_SET}"
results.append(check_str_result(sub_meta_entity, s))
# problem.engine.api.reset()
# problem.engine.api.eval(SUB_SET_SCRIPT)
return results
| [
"symro.read_ampl",
"symro.src.parsing.amplparser.AMPLParser",
"symro.src.handlers.metaentitybuilder.build_sub_meta_entity"
] | [((1442, 1529), 'symro.read_ampl', 'symro.read_ampl', ([], {'script_literal': 'FIXED_DIM_SCRIPT', 'working_dir_path': 'SCRIPT_DIR_PATH'}), '(script_literal=FIXED_DIM_SCRIPT, working_dir_path=\n SCRIPT_DIR_PATH)\n', (1457, 1529), False, 'import symro\n'), ((1829, 1914), 'symro.read_ampl', 'symro.read_ampl', ([], {'script_literal': 'SUB_SET_SCRIPT', 'working_dir_path': 'SCRIPT_DIR_PATH'}), '(script_literal=SUB_SET_SCRIPT, working_dir_path=SCRIPT_DIR_PATH\n )\n', (1844, 1914), False, 'import symro\n'), ((1959, 1978), 'symro.src.parsing.amplparser.AMPLParser', 'AMPLParser', (['problem'], {}), '(problem)\n', (1969, 1978), False, 'from symro.src.parsing.amplparser import AMPLParser\n'), ((2200, 2324), 'symro.src.handlers.metaentitybuilder.build_sub_meta_entity', 'eb.build_sub_meta_entity', ([], {'problem': 'problem', 'idx_subset_node': 'mv_1.idx_set_node', 'meta_entity': 'mv_1', 'entity_idx_node': 'idx_node'}), '(problem=problem, idx_subset_node=mv_1.idx_set_node,\n meta_entity=mv_1, entity_idx_node=idx_node)\n', (2224, 2324), True, 'import symro.src.handlers.metaentitybuilder as eb\n'), ((2633, 2755), 'symro.src.handlers.metaentitybuilder.build_sub_meta_entity', 'eb.build_sub_meta_entity', ([], {'problem': 'problem', 'idx_subset_node': 'idx_subset_node', 'meta_entity': 'mv_1', 'entity_idx_node': 'idx_node'}), '(problem=problem, idx_subset_node=idx_subset_node,\n meta_entity=mv_1, entity_idx_node=idx_node)\n', (2657, 2755), True, 'import symro.src.handlers.metaentitybuilder as eb\n'), ((3003, 3127), 'symro.src.handlers.metaentitybuilder.build_sub_meta_entity', 'eb.build_sub_meta_entity', ([], {'problem': 'problem', 'idx_subset_node': 'mv_1.idx_set_node', 'meta_entity': 'mv_1', 'entity_idx_node': 'idx_node'}), '(problem=problem, idx_subset_node=mv_1.idx_set_node,\n meta_entity=mv_1, entity_idx_node=idx_node)\n', (3027, 3127), True, 'import symro.src.handlers.metaentitybuilder as eb\n'), ((3480, 3602), 'symro.src.handlers.metaentitybuilder.build_sub_meta_entity', 'eb.build_sub_meta_entity', ([], {'problem': 'problem', 'idx_subset_node': 'idx_subset_node', 'meta_entity': 'mv_2', 'entity_idx_node': 'idx_node'}), '(problem=problem, idx_subset_node=idx_subset_node,\n meta_entity=mv_2, entity_idx_node=idx_node)\n', (3504, 3602), True, 'import symro.src.handlers.metaentitybuilder as eb\n'), ((4022, 4144), 'symro.src.handlers.metaentitybuilder.build_sub_meta_entity', 'eb.build_sub_meta_entity', ([], {'problem': 'problem', 'idx_subset_node': 'idx_subset_node', 'meta_entity': 'mv_1', 'entity_idx_node': 'idx_node'}), '(problem=problem, idx_subset_node=idx_subset_node,\n meta_entity=mv_1, entity_idx_node=idx_node)\n', (4046, 4144), True, 'import symro.src.handlers.metaentitybuilder as eb\n'), ((4560, 4682), 'symro.src.handlers.metaentitybuilder.build_sub_meta_entity', 'eb.build_sub_meta_entity', ([], {'problem': 'problem', 'idx_subset_node': 'idx_subset_node', 'meta_entity': 'mv_2', 'entity_idx_node': 'idx_node'}), '(problem=problem, idx_subset_node=idx_subset_node,\n meta_entity=mv_2, entity_idx_node=idx_node)\n', (4584, 4682), True, 'import symro.src.handlers.metaentitybuilder as eb\n')] |
import boto3
import uuid
import json
import os
def lambda_handler(event, context):
record = event['Records'][0]
s3bucket = record['s3']['bucket']['name']
s3object = record['s3']['object']['key']
print(s3object.split(".")[0].split("-")[2])
if s3object.split(".")[0].split("-")[2] == "EN":
s3Path = "s3://" + s3bucket + "/" + s3object
VocabName = "custom-vocab-EN-" + str(uuid.uuid4())
client = boto3.client('transcribe')
print("S3 Path:" + s3Path)
response = client.create_vocabulary(
VocabularyName=VocabName,
LanguageCode='en-US',
VocabularyFileUri = s3Path,
)
return {
'VocabularybName': response['VocabularyName']
}
elif s3object.split(".")[0].split("-")[2] == "ES":
s3Path = "s3://" + s3bucket + "/" + s3object
VocabName = "custom-vocab-ES-" + str(uuid.uuid4())
client = boto3.client('transcribe')
print("S3 Path:" + s3Path)
response = client.create_vocabulary(
VocabularyName=VocabName,
LanguageCode='es-ES',
VocabularyFileUri = s3Path,
)
return {
'VocabularybName': response['VocabularyName']
}
else:
return {
'ErrorCode': "Language not in filename, must end in EN or ES"
} | [
"boto3.client",
"uuid.uuid4"
] | [((464, 490), 'boto3.client', 'boto3.client', (['"""transcribe"""'], {}), "('transcribe')\n", (476, 490), False, 'import boto3\n'), ((971, 997), 'boto3.client', 'boto3.client', (['"""transcribe"""'], {}), "('transcribe')\n", (983, 997), False, 'import boto3\n'), ((432, 444), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (442, 444), False, 'import uuid\n'), ((939, 951), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (949, 951), False, 'import uuid\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 10 14:12:28 2022
@author: 1517suj
"""
from model import Yolov1
import torch
import torch.optim as optim
import torchvision.transforms as T
import cv2
from utils import (
non_max_suppression,
mean_average_precision,
intersection_over_union,
cellboxes_to_boxes, #converting boxes from relative to the cell to relative to the entire image
get_bboxes,
plot_image,
load_checkpoint,)
# inference
DEVICE = 'cuda'
LOAD_MODEL_FILE = "overfit.pth.tar"
LEARNING_RATE = 2e-5
WEIGHT_DECAY = 0
model = Yolov1(split_size=7, num_boxes=2, num_classes=20)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY)
load_checkpoint(torch.load(LOAD_MODEL_FILE), model, optimizer)
model.to(DEVICE)
model.eval()
# %%
# some testing images
# path = 'two_dogs_test1.png'
# path = 'two_dogs_test2.jpg'
# path = 'three_dogs.png'
# path = 'yaz_test.jpg'
path = 'town_center.png'
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
print(img.shape)
# cv2.imshow('image', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# # convert from numpy array to tensor
# inputs = torch.from_numpy(img) # torch.Size([H,W,3])
# inputs = inputs.permute(2,0,1) # torch.Size([3,H,W])
# print(inputs.shape)
# change the height and width to fit the model
transform=T.Compose([T.ToTensor(), # TO TENSOR FIRST!!!!!!!!!
T.Resize((448, 448))])
# when you use ToTensor() class, PyTorch automatically converts all images into [0,1].
inputs = transform(img) # torch.Size([3,448,448])
print(inputs.shape)
# increase the dimension
inputs = torch.unsqueeze(inputs, 0) # torch.Size([1,3,H,W])
print(inputs.shape)
input_tensor = inputs.to(DEVICE)
output = model(input_tensor)
print(output.shape)
# %%
#convert to results
bboxes = cellboxes_to_boxes(output) # convert to bboxes
# plot without non_max_suppression
# plot_image(input_tensor.squeeze(0).permute(1,2,0).to("cpu"), bboxes[0])
plot_image(img, bboxes[0])
# NMS
bboxes = non_max_suppression(bboxes[0], iou_threshold=0.8, threshold=0.15, box_format="midpoint")
# plot after NMS (this one plots the 448x448 results)
# plot_image(input_tensor.squeeze(0).permute(1,2,0).to("cpu"), bboxes)
# this one shows the actual image size
plot_image(img, bboxes)
# cap = cv2.VideoCapture(path)
# retaining = True
# kk = 0
# image_list = []
# clip = []
# while retaining:
# retaining, frame = cap.read()
# if not retaining and frame is None:
# continue
# # tmp_ = center_crop(cv2.resize(frame, (171, 128)))
# # tmp = tmp_ - np.array([[[90.0, 98.0, 102.0]]])
# # i = 0
# # i += 1
# # if i == 0 and i % 7 == 0:
# # clip.append(frame)
# clip.append(frame)
# if len(clip) == 16:
# inputs = np.array(clip).astype(np.float32)
# # inputs = np.expand_dims(inputs, axis=0)
# # inputs = np.transpose(inputs, (0, 4, 1, 2, 3))
# # convert from numpy array to tensor
# inputs = torch.from_numpy(inputs) # torch.Size([16, 360, 640, 3])
# inputs = inputs.permute(3,0,1,2) # torch.Size([3, 16, 360, 640])
# # print(inputs.shape)
# # change the height and width to fit the model
# inputs = trans_val(inputs) # torch.Size([3, 16, 224, 224])
# # increase the dimension
# inputs = torch.unsqueeze(inputs, 0) # torch.Size([1, 3, 16, 224, 224])
# # Data normalization
# # Divide every pixel intensity by 255.0
# inputs = inputs.type(torch.FloatTensor).div_(255.0)
# # Pixel intensity from 0 ~ 255 to [-1, +1]
# inputs = inputs.type(torch.FloatTensor).sub_(stats['mean'][:,None,None,None]).div_(stats['std'][:,None,None,None])
# inputs = inputs.to(device)
# # inputs = torch.autograd.Variable(inputs, requires_grad=False).to(device)
# with torch.no_grad():
# outputs = model(inputs).squeeze(2)
# # outputs = model.forward(inputs).squeeze(2)
# # compute the probability
# m = nn.Softmax(dim=1)
# # probs = torch.max(m(outputs))
# probs_fight = m(outputs)[0][0]
# # check!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# # probs = torch.max(outputs)
# # print(probs_fight)
# predicted_labels = torch.argmax(outputs)
# # _, predicted_labels = torch.max(outputs, 1)
# # print(int(predicted_labels))
# # print(outputs.shape)
# # label = torch.max(probs, 1)[1].detach().cpu().numpy()[0]
# cv2.putText(frame, labels[int(predicted_labels)], (5, 50),
# cv2.FONT_HERSHEY_SIMPLEX, 1.5,
# (0, 0, 255), thickness=2)
# cv2.putText(frame, "Prob_fight: %.4f" % probs_fight, (5, 100),
# cv2.FONT_HERSHEY_SIMPLEX, 1.0,
# (0, 0, 255), thickness=2)
# # print(len(clip))
# clip.pop(0)
# # print(len(clip))
# # break
# cv2.imshow('result', frame)
# # make sure don't make gif too big
# kk += 1
# print(kk)
# # if kk > 500 and kk < 800:
# # print(kk)
# # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# # image_list.append(frame_rgb)
# # if kk > 800:
# # break
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# # cv2.waitKey(30)
# cap.release()
# cv2.destroyAllWindows()
| [
"utils.cellboxes_to_boxes",
"torch.unsqueeze",
"utils.plot_image",
"utils.non_max_suppression",
"torch.load",
"model.Yolov1",
"cv2.cvtColor",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"cv2.imread"
] | [((593, 642), 'model.Yolov1', 'Yolov1', ([], {'split_size': '(7)', 'num_boxes': '(2)', 'num_classes': '(20)'}), '(split_size=7, num_boxes=2, num_classes=20)\n', (599, 642), False, 'from model import Yolov1\n'), ((997, 1013), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1007, 1013), False, 'import cv2\n'), ((1020, 1056), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1032, 1056), False, 'import cv2\n'), ((1665, 1691), 'torch.unsqueeze', 'torch.unsqueeze', (['inputs', '(0)'], {}), '(inputs, 0)\n', (1680, 1691), False, 'import torch\n'), ((1863, 1889), 'utils.cellboxes_to_boxes', 'cellboxes_to_boxes', (['output'], {}), '(output)\n', (1881, 1889), False, 'from utils import non_max_suppression, mean_average_precision, intersection_over_union, cellboxes_to_boxes, get_bboxes, plot_image, load_checkpoint\n'), ((2020, 2046), 'utils.plot_image', 'plot_image', (['img', 'bboxes[0]'], {}), '(img, bboxes[0])\n', (2030, 2046), False, 'from utils import non_max_suppression, mean_average_precision, intersection_over_union, cellboxes_to_boxes, get_bboxes, plot_image, load_checkpoint\n'), ((2063, 2155), 'utils.non_max_suppression', 'non_max_suppression', (['bboxes[0]'], {'iou_threshold': '(0.8)', 'threshold': '(0.15)', 'box_format': '"""midpoint"""'}), "(bboxes[0], iou_threshold=0.8, threshold=0.15,\n box_format='midpoint')\n", (2082, 2155), False, 'from utils import non_max_suppression, mean_average_precision, intersection_over_union, cellboxes_to_boxes, get_bboxes, plot_image, load_checkpoint\n'), ((2318, 2341), 'utils.plot_image', 'plot_image', (['img', 'bboxes'], {}), '(img, bboxes)\n', (2328, 2341), False, 'from utils import non_max_suppression, mean_average_precision, intersection_over_union, cellboxes_to_boxes, get_bboxes, plot_image, load_checkpoint\n'), ((747, 774), 'torch.load', 'torch.load', (['LOAD_MODEL_FILE'], {}), '(LOAD_MODEL_FILE)\n', (757, 774), False, 'import torch\n'), ((1387, 1399), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1397, 1399), True, 'import torchvision.transforms as T\n'), ((1449, 1469), 'torchvision.transforms.Resize', 'T.Resize', (['(448, 448)'], {}), '((448, 448))\n', (1457, 1469), True, 'import torchvision.transforms as T\n')] |
import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
setup(
name="satisfy-calc",
version="1.1.3",
description="Command line crafting tree visualizer for Satisfactory Game by CoffeeStain Studios",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/sedatDemiriz/satisfy-calc",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
install_requires=["bs4", "jsonpickle", "requests"],
packages=["satisfy_calc"],
entry_points={
"console_scripts": [
"satisfy_calc=satisfy_calc.__main__:main"
]
},
)
# setup(
# name="realpython-reader",
# version="1.0.0",
# description="Read the latest Real Python tutorials",
# long_description=README,
# long_description_content_type="text/markdown",
# url="https://github.com/realpython/reader",
# author="Real Python",
# author_email="<EMAIL>",
# license="MIT",
# classifiers=[
# "License :: OSI Approved :: MIT License",
# "Programming Language :: Python :: 3",
# "Programming Language :: Python :: 3.7",
# ],
# packages=["reader"],
# include_package_data=True,
# install_requires=["feedparser", "html2text"],
# entry_points={
# "console_scripts": [
# "realpython=reader.__main__:main",
# ]
# },
# )
| [
"setuptools.setup",
"pathlib.Path"
] | [((193, 849), 'setuptools.setup', 'setup', ([], {'name': '"""satisfy-calc"""', 'version': '"""1.1.3"""', 'description': '"""Command line crafting tree visualizer for Satisfactory Game by CoffeeStain Studios"""', 'long_description': 'README', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/sedatDemiriz/satisfy-calc"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'classifiers': "['License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7']", 'install_requires': "['bs4', 'jsonpickle', 'requests']", 'packages': "['satisfy_calc']", 'entry_points': "{'console_scripts': ['satisfy_calc=satisfy_calc.__main__:main']}"}), "(name='satisfy-calc', version='1.1.3', description=\n 'Command line crafting tree visualizer for Satisfactory Game by CoffeeStain Studios'\n , long_description=README, long_description_content_type=\n 'text/markdown', url='https://github.com/sedatDemiriz/satisfy-calc',\n author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7'], install_requires=['bs4',\n 'jsonpickle', 'requests'], packages=['satisfy_calc'], entry_points={\n 'console_scripts': ['satisfy_calc=satisfy_calc.__main__:main']})\n", (198, 849), False, 'from setuptools import setup\n'), ((89, 111), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (101, 111), False, 'import pathlib\n')] |
from TestOutput import LogManager
class Auth_Test:
def __init__(self):
self.logger = LogManager('AuthTest')
self.logger.writeTestEvent('AuthTest', 'Test Started')
if __name__ == "__main__":
Auth_Test() | [
"TestOutput.LogManager"
] | [((90, 112), 'TestOutput.LogManager', 'LogManager', (['"""AuthTest"""'], {}), "('AuthTest')\n", (100, 112), False, 'from TestOutput import LogManager\n')] |
"""
Takes the MNIST dataset as input (images and labels separated)
and creates a new dataset only with 0's and 1's
"""
import numpy as np
DATA_PATH = "data/raw/"
OUTPUT_PATH = "data/processed/mnist/"
X = np.loadtxt(DATA_PATH + "mnist2500_X.txt")
labels = np.loadtxt(DATA_PATH + "mnist2500_labels.txt")
X_new = []
labels_new = []
for i,label in enumerate(labels):
if label < 5:
labels_new.append(label)
X_new.append(X[i])
if i%100 == 0:
print(f"{i} labels passed")
np.savetxt(OUTPUT_PATH + "mnist2500_X_01234.txt",X_new)
np.savetxt(OUTPUT_PATH +"mnist2500_labels_01234.txt",labels_new) | [
"numpy.loadtxt",
"numpy.savetxt"
] | [((206, 247), 'numpy.loadtxt', 'np.loadtxt', (["(DATA_PATH + 'mnist2500_X.txt')"], {}), "(DATA_PATH + 'mnist2500_X.txt')\n", (216, 247), True, 'import numpy as np\n'), ((257, 303), 'numpy.loadtxt', 'np.loadtxt', (["(DATA_PATH + 'mnist2500_labels.txt')"], {}), "(DATA_PATH + 'mnist2500_labels.txt')\n", (267, 303), True, 'import numpy as np\n'), ((503, 559), 'numpy.savetxt', 'np.savetxt', (["(OUTPUT_PATH + 'mnist2500_X_01234.txt')", 'X_new'], {}), "(OUTPUT_PATH + 'mnist2500_X_01234.txt', X_new)\n", (513, 559), True, 'import numpy as np\n'), ((559, 625), 'numpy.savetxt', 'np.savetxt', (["(OUTPUT_PATH + 'mnist2500_labels_01234.txt')", 'labels_new'], {}), "(OUTPUT_PATH + 'mnist2500_labels_01234.txt', labels_new)\n", (569, 625), True, 'import numpy as np\n')] |
from __future__ import print_function
import os
import sys
import pcbnew
import numpy as np
import pprint
def inch_to_nanometer(value):
return (value*25.4)*1e6
def nanometer_to_inch(value):
return value/(25.4*1.e6)
def nm_to_mm(value):
return value*1.e-6
def mm_to_nm(value):
return value*1.e6
def print_module_info(module):
ref = module.GetReference()
pos = module.GetPosition()
x = nanometer_to_inch(pos.x)
y = nanometer_to_inch(pos.y)
angle = 0.1*module.GetOrientation()
print(' R: {}'.format(ref))
print(' X: {}'.format(x))
print(' Y: {}'.format(y))
print(' A: {}'.format(angle))
print()
def get_placement_data(param):
placement_data = {}
for i, ref in enumerate(param['ref_list']):
x = param['x_start'] + i*param['x_step']
y = param['y_value']
angle = param['angle']
placement_data[ref] = {'angle': angle, 'x': x, 'y': y}
return placement_data
def place_pcb_modules(filename, placement_data):
print()
print('loading pcb: {}'.format(filename))
print()
pcb = pcbnew.LoadBoard(filename)
print()
print('done')
print()
for module in pcb.GetModules():
ref_str = str(module.GetReference())
try:
data = placement_data[ref_str]
except KeyError:
continue
print_module_info(module)
# Move to new position
pos = module.GetPosition()
angle = 0.1*module.GetOrientation()
x_new = data['x']
y_new = data['y']
angle_new = data['angle']
pos.x = int(mm_to_nm(x_new))
pos.y = int(mm_to_nm(y_new))
module.SetPosition(pos)
module.SetOrientation(10.0*angle_new)
print_module_info(module)
pathname, basename = os.path.split(filename)
new_basename = 'mod_{}'.format(basename)
new_filename = os.path.join(pathname,new_basename)
pcb.Save(new_filename)
# ---------------------------------------------------------------------------------------
if __name__ == '__main__':
param = {
'ref_list': ['J{}'.format(i) for i in range(1,19)],
'y_value' : 56.0,
'x_start' : 50.0+39.5,
'x_step' : 13.0,
'angle' : 180.0,
}
placement_data = get_placement_data(param)
if 0:
print('led_data')
print()
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(placement_data)
print()
print('modules')
print()
filename = sys.argv[1]
place_pcb_modules(filename, placement_data)
| [
"pcbnew.LoadBoard",
"os.path.join",
"pprint.PrettyPrinter",
"os.path.split"
] | [((1101, 1127), 'pcbnew.LoadBoard', 'pcbnew.LoadBoard', (['filename'], {}), '(filename)\n', (1117, 1127), False, 'import pcbnew\n'), ((1804, 1827), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (1817, 1827), False, 'import os\n'), ((1892, 1928), 'os.path.join', 'os.path.join', (['pathname', 'new_basename'], {}), '(pathname, new_basename)\n', (1904, 1928), False, 'import os\n'), ((2438, 2468), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(2)'}), '(indent=2)\n', (2458, 2468), False, 'import pprint\n')] |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
import sys
import iotc
from iotc import IOTConnectType, IOTLogLevel
from random import randint
import base64
import hmac
import hashlib
gIsMicroPython = ('implementation' in dir(sys)) and ('name' in dir(sys.implementation)) and (sys.implementation.name == 'micropython')
def computeKey(secret, regId):
global gIsMicroPython
try:
secret = base64.b64decode(secret)
except:
print("ERROR: broken base64 secret => `" + secret + "`")
sys.exit()
if gIsMicroPython == False:
return base64.b64encode(hmac.new(secret, msg=regId.encode('utf8'), digestmod=hashlib.sha256).digest())
else:
return base64.b64encode(hmac.new(secret, msg=regId.encode('utf8'), digestmod=hashlib._sha256.sha256).digest())
deviceId = "DEVICE_ID"
scopeId = "SCOPE_ID"
masterKey = "PRIMARY/SECONDARY master Key"
deviceKey = computeKey(masterKey, deviceId)
iotc = iotc.Device(scopeId, deviceKey, deviceId, IOTConnectType.IOTC_CONNECT_SYMM_KEY)
iotc.setLogLevel(IOTLogLevel.IOTC_LOGGING_API_ONLY)
gCanSend = False
gCounter = 0
def onconnect(info):
global gCanSend
print("- [onconnect] => status:" + str(info.getStatusCode()))
if info.getStatusCode() == 0:
if iotc.isConnected():
gCanSend = True
def onmessagesent(info):
print("\t- [onmessagesent] => " + str(info.getPayload()))
def oncommand(info):
print("- [oncommand] => " + info.getTag() + " => " + str(info.getPayload()))
def onsettingsupdated(info):
print("- [onsettingsupdated] => " + info.getTag() + " => " + info.getPayload())
iotc.on("ConnectionStatus", onconnect)
iotc.on("MessageSent", onmessagesent)
iotc.on("Command", oncommand)
iotc.on("SettingsUpdated", onsettingsupdated)
iotc.connect()
while iotc.isConnected():
iotc.doNext() # do the async work needed to be done for MQTT
if gCanSend == True:
if gCounter % 20 == 0:
gCounter = 0
print("Sending telemetry..")
iotc.sendTelemetry("{ \
\"temp\": " + str(randint(20, 45)) + ", \
\"accelerometerX\": " + str(randint(2, 15)) + ", \
\"accelerometerY\": " + str(randint(3, 9)) + ", \
\"accelerometerZ\": " + str(randint(1, 4)) + "}")
gCounter += 1
| [
"iotc.Device",
"iotc.connect",
"iotc.isConnected",
"base64.b64decode",
"iotc.doNext",
"iotc.on",
"iotc.setLogLevel",
"sys.exit",
"random.randint"
] | [((944, 1023), 'iotc.Device', 'iotc.Device', (['scopeId', 'deviceKey', 'deviceId', 'IOTConnectType.IOTC_CONNECT_SYMM_KEY'], {}), '(scopeId, deviceKey, deviceId, IOTConnectType.IOTC_CONNECT_SYMM_KEY)\n', (955, 1023), False, 'import iotc\n'), ((1024, 1075), 'iotc.setLogLevel', 'iotc.setLogLevel', (['IOTLogLevel.IOTC_LOGGING_API_ONLY'], {}), '(IOTLogLevel.IOTC_LOGGING_API_ONLY)\n', (1040, 1075), False, 'import iotc\n'), ((1594, 1632), 'iotc.on', 'iotc.on', (['"""ConnectionStatus"""', 'onconnect'], {}), "('ConnectionStatus', onconnect)\n", (1601, 1632), False, 'import iotc\n'), ((1633, 1670), 'iotc.on', 'iotc.on', (['"""MessageSent"""', 'onmessagesent'], {}), "('MessageSent', onmessagesent)\n", (1640, 1670), False, 'import iotc\n'), ((1671, 1700), 'iotc.on', 'iotc.on', (['"""Command"""', 'oncommand'], {}), "('Command', oncommand)\n", (1678, 1700), False, 'import iotc\n'), ((1701, 1746), 'iotc.on', 'iotc.on', (['"""SettingsUpdated"""', 'onsettingsupdated'], {}), "('SettingsUpdated', onsettingsupdated)\n", (1708, 1746), False, 'import iotc\n'), ((1748, 1762), 'iotc.connect', 'iotc.connect', ([], {}), '()\n', (1760, 1762), False, 'import iotc\n'), ((1770, 1788), 'iotc.isConnected', 'iotc.isConnected', ([], {}), '()\n', (1786, 1788), False, 'import iotc\n'), ((1792, 1805), 'iotc.doNext', 'iotc.doNext', ([], {}), '()\n', (1803, 1805), False, 'import iotc\n'), ((431, 455), 'base64.b64decode', 'base64.b64decode', (['secret'], {}), '(secret)\n', (447, 455), False, 'import base64\n'), ((1251, 1269), 'iotc.isConnected', 'iotc.isConnected', ([], {}), '()\n', (1267, 1269), False, 'import iotc\n'), ((531, 541), 'sys.exit', 'sys.exit', ([], {}), '()\n', (539, 541), False, 'import sys\n'), ((2158, 2171), 'random.randint', 'randint', (['(1)', '(4)'], {}), '(1, 4)\n', (2165, 2171), False, 'from random import randint\n'), ((2108, 2121), 'random.randint', 'randint', (['(3)', '(9)'], {}), '(3, 9)\n', (2115, 2121), False, 'from random import randint\n'), ((2057, 2071), 'random.randint', 'randint', (['(2)', '(15)'], {}), '(2, 15)\n', (2064, 2071), False, 'from random import randint\n'), ((2005, 2020), 'random.randint', 'randint', (['(20)', '(45)'], {}), '(20, 45)\n', (2012, 2020), False, 'from random import randint\n')] |
import random
import copy
import time
from numba import jit
from array_creator import create_test_set
from measure import get_elapsed_data
@get_elapsed_data
def scratch_lin_search(test_set):
hit = False
for key, arr in test_set:
for i in arr:
if i == key:
break
@get_elapsed_data
def std_search(test_set):
for key, arr in test_set:
if key in arr:
continue
def main():
size = 100000
trial = 1
test_set = create_test_set(size, trial)
original = list(range(size))
print(scratch_lin_search(test_set))
print(std_search(test_set))
if __name__ == '__main__':
main()
| [
"array_creator.create_test_set"
] | [((489, 517), 'array_creator.create_test_set', 'create_test_set', (['size', 'trial'], {}), '(size, trial)\n', (504, 517), False, 'from array_creator import create_test_set\n')] |
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Use "distribute" - the setuptools fork that supports python 3.
from distribute_setup import use_setuptools
use_setuptools()
from distutils.command import sdist
import glob
import os
import sys
from setuptools import setup, find_packages
#A dirty hack to get around some early import/configurations ambiguities
#This is the same as setup_helpers.set_build_mode(), but does not require
#importing setup_helpers
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._PACKAGE_SETUP_ = True
import astropy
from astropy import setup_helpers
from astropy.version_helper import get_git_devstr, generate_version_py
# Set affiliated package-specific settings
PACKAGENAME = 'astroquery'
DESCRIPTION = 'Functions and classes to access online data resources'
LICENSE = 'BSD'
#version should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
version = '0.0.dev'
# Indicates if this version is a release version
release = 'dev' not in version
# Adjust the compiler in case the default on this platform is to use a
# broken one.
setup_helpers.adjust_compiler()
if not release:
version += get_git_devstr(False)
generate_version_py(PACKAGENAME, version, release,
setup_helpers.get_debug_option())
# Use the find_packages tool to locate all packages and modules
packagenames = find_packages()
# Treat everything in scripts except README.rst as a script to be installed
scripts = glob.glob(os.path.join('scripts', '*'))
scripts.remove(os.path.join('scripts', 'README.rst'))
# This dictionary stores the command classes used in setup below
cmdclassd = {'test': setup_helpers.setup_test_command(PACKAGENAME),
# Use distutils' sdist because it respects package_data.
# setuptools/distributes sdist requires duplication of
# information in MANIFEST.in
'sdist': sdist.sdist,
# Use a custom build command which understands additional
# commandline arguments
'build': setup_helpers.AstropyBuild,
# Use a custom install command which understands additional
# commandline arguments
'install': setup_helpers.AstropyInstall
}
if setup_helpers.HAVE_CYTHON and not release:
from Cython.Distutils import build_ext
# Builds Cython->C if in dev mode and Cython is present
cmdclassd['build_ext'] = setup_helpers.wrap_build_ext(build_ext)
else:
cmdclassd['build_ext'] = setup_helpers.wrap_build_ext()
if hasattr(setup_helpers,'AstropyBuildSphinx') and setup_helpers.AstropyBuildSphinx is not None:
cmdclassd['build_sphinx'] = setup_helpers.AstropyBuildSphinx
# Set our custom command class mapping in setup_helpers, so that
# setup_helpers.get_distutils_option will use the custom classes.
setup_helpers.cmdclassd = cmdclassd
# Additional C extensions that are not Cython-based should be added here.
extensions = []
# A dictionary to keep track of all package data to install
package_data = {PACKAGENAME: ['data/*']}
# A dictionary to keep track of extra packagedir mappings
package_dirs = {}
# Update extensions, package_data, packagenames and package_dirs from
# any sub-packages that define their own extension modules and package
# data. See the docstring for setup_helpers.update_package_files for
# more details.
setup_helpers.update_package_files(PACKAGENAME, extensions, package_data,
packagenames, package_dirs)
setup(name=PACKAGENAME,
version=version,
description=DESCRIPTION,
packages=packagenames,
package_data=package_data,
package_dir=package_dirs,
ext_modules=extensions,
scripts=scripts,
requires=['astropy'],
install_requires=['astropy'],
provides=[PACKAGENAME],
license=LICENSE,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=True
)
| [
"distribute_setup.use_setuptools",
"astropy.setup_helpers.wrap_build_ext",
"setuptools.find_packages",
"setuptools.setup",
"os.path.join",
"astropy.setup_helpers.update_package_files",
"astropy.setup_helpers.get_debug_option",
"astropy.setup_helpers.setup_test_command",
"astropy.setup_helpers.adjust... | [((196, 212), 'distribute_setup.use_setuptools', 'use_setuptools', ([], {}), '()\n', (210, 212), False, 'from distribute_setup import use_setuptools\n'), ((1168, 1199), 'astropy.setup_helpers.adjust_compiler', 'setup_helpers.adjust_compiler', ([], {}), '()\n', (1197, 1199), False, 'from astropy import setup_helpers\n'), ((1439, 1454), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1452, 1454), False, 'from setuptools import setup, find_packages\n'), ((3439, 3544), 'astropy.setup_helpers.update_package_files', 'setup_helpers.update_package_files', (['PACKAGENAME', 'extensions', 'package_data', 'packagenames', 'package_dirs'], {}), '(PACKAGENAME, extensions, package_data,\n packagenames, package_dirs)\n', (3473, 3544), False, 'from astropy import setup_helpers\n'), ((3579, 3922), 'setuptools.setup', 'setup', ([], {'name': 'PACKAGENAME', 'version': 'version', 'description': 'DESCRIPTION', 'packages': 'packagenames', 'package_data': 'package_data', 'package_dir': 'package_dirs', 'ext_modules': 'extensions', 'scripts': 'scripts', 'requires': "['astropy']", 'install_requires': "['astropy']", 'provides': '[PACKAGENAME]', 'license': 'LICENSE', 'cmdclass': 'cmdclassd', 'zip_safe': '(False)', 'use_2to3': '(True)'}), "(name=PACKAGENAME, version=version, description=DESCRIPTION, packages=\n packagenames, package_data=package_data, package_dir=package_dirs,\n ext_modules=extensions, scripts=scripts, requires=['astropy'],\n install_requires=['astropy'], provides=[PACKAGENAME], license=LICENSE,\n cmdclass=cmdclassd, zip_safe=False, use_2to3=True)\n", (3584, 3922), False, 'from setuptools import setup, find_packages\n'), ((1232, 1253), 'astropy.version_helper.get_git_devstr', 'get_git_devstr', (['(False)'], {}), '(False)\n', (1246, 1253), False, 'from astropy.version_helper import get_git_devstr, generate_version_py\n'), ((1325, 1357), 'astropy.setup_helpers.get_debug_option', 'setup_helpers.get_debug_option', ([], {}), '()\n', (1355, 1357), False, 'from astropy import setup_helpers\n'), ((1552, 1580), 'os.path.join', 'os.path.join', (['"""scripts"""', '"""*"""'], {}), "('scripts', '*')\n", (1564, 1580), False, 'import os\n'), ((1597, 1634), 'os.path.join', 'os.path.join', (['"""scripts"""', '"""README.rst"""'], {}), "('scripts', 'README.rst')\n", (1609, 1634), False, 'import os\n'), ((1723, 1768), 'astropy.setup_helpers.setup_test_command', 'setup_helpers.setup_test_command', (['PACKAGENAME'], {}), '(PACKAGENAME)\n', (1755, 1768), False, 'from astropy import setup_helpers\n'), ((2504, 2543), 'astropy.setup_helpers.wrap_build_ext', 'setup_helpers.wrap_build_ext', (['build_ext'], {}), '(build_ext)\n', (2532, 2543), False, 'from astropy import setup_helpers\n'), ((2579, 2609), 'astropy.setup_helpers.wrap_build_ext', 'setup_helpers.wrap_build_ext', ([], {}), '()\n', (2607, 2609), False, 'from astropy import setup_helpers\n')] |
import _config
import _utils
CONFIG_PATH = "config.toml"
def main():
config = _config.read(CONFIG_PATH)
for path_name in [x.in_ for x in config.directorios]:
_utils.list_jpg_files_in_dir(path_name)
if __name__ == "__main__":
main()
| [
"_config.read",
"_utils.list_jpg_files_in_dir"
] | [((86, 111), '_config.read', '_config.read', (['CONFIG_PATH'], {}), '(CONFIG_PATH)\n', (98, 111), False, 'import _config\n'), ((178, 217), '_utils.list_jpg_files_in_dir', '_utils.list_jpg_files_in_dir', (['path_name'], {}), '(path_name)\n', (206, 217), False, 'import _utils\n')] |
import time
class SocketWrap(object):
def __init__(self, sock, ip=None, port=None, host="", target=""):
self._sock = sock
self.ip = ip
self.port = port
self.host = host
self.target = target
self.recved_data = 0
self.recved_times = 0
self.create_time = time.time()
self.closed = False
self.replace_pattern = None
self.buf = []
self.buf_size = 0
self.buf_num = 0
def __getattr__(self, attr):
return getattr(self._sock, attr)
def close(self):
self._sock.close()
self.closed = True
def is_closed(self):
return self.closed
def __str__(self):
return "%s[%s]:%d" % (self.host, self.ip, self.port)
def recv(self, bufsiz, flags=0):
d = self._sock.recv(bufsiz, flags)
if self.replace_pattern and " HTTP/1.1\r\n" in d:
line_end = d.find("\r\n")
req_line = d[:line_end]
words = req_line.split()
if len(words) == 3:
method, url, http_version = words
url = url.replace(self.replace_pattern[0], self.replace_pattern[1])
d = "%s %s %s" % (method, url, http_version) + d[line_end:]
return d
def add_dat(self, data):
self.buf.append(data)
self.buf_size += len(data)
self.buf_num += 1
def get_dat(self):
if not self.buf:
return ""
dat = self.buf.pop(0)
self.buf_size -= len(dat)
self.buf_num -= 1
return dat
def restore_dat(self, dat):
self.buf.insert(0, dat)
self.buf_size += len(dat)
self.buf_num += 1 | [
"time.time"
] | [((323, 334), 'time.time', 'time.time', ([], {}), '()\n', (332, 334), False, 'import time\n')] |
import pytest
from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text
from bigxml.nodes import XMLText
def test_one_maker_element():
@xml_handle_element("abc", "def")
def fct(arg):
return arg * 6
assert getattr(fct, _ATTR_MARKER, None) == (("abc", "def"),)
assert fct(7) == 42
def test_one_maker_element_on_method():
class Klass:
def __init__(self, multiplier):
self.multiplier = multiplier
@xml_handle_element("abc", "def")
def method(self, arg):
return arg * self.multiplier
instance = Klass(6)
assert getattr(instance.method, _ATTR_MARKER, None) == (("abc", "def"),)
assert instance.method(7) == 42
def test_one_maker_element_on_static_method():
class Klass:
@xml_handle_element("abc", "def")
@staticmethod
def method(arg):
return arg * 6
assert getattr(Klass.method, _ATTR_MARKER, None) == (("abc", "def"),)
assert Klass.method(7) == 42
def test_one_maker_element_on_method_before_staticmethod():
class Klass:
@staticmethod
@xml_handle_element("abc", "def")
def method(arg):
return arg * 6
assert getattr(Klass.method, _ATTR_MARKER, None) == (("abc", "def"),)
assert Klass.method(7) == 42
def test_several_maker_element():
@xml_handle_element("abc", "def")
@xml_handle_element("ghi")
@xml_handle_element("klm", "opq", "rst")
def fct(arg):
return arg * 6
assert getattr(fct, _ATTR_MARKER, None) == (
("klm", "opq", "rst"),
("ghi",),
("abc", "def"),
)
assert fct(7) == 42
def test_one_maker_element_no_args():
with pytest.raises(TypeError):
@xml_handle_element()
def fct(arg): # pylint: disable=unused-variable
return arg * 6
def test_one_marker_text_no_call():
@xml_handle_text
def fct(arg):
return arg * 6
assert getattr(fct, _ATTR_MARKER, None) == ((XMLText.name,),)
assert fct(7) == 42
def test_one_marker_text_no_args():
@xml_handle_text()
def fct(arg):
return arg * 6
assert getattr(fct, _ATTR_MARKER, None) == ((XMLText.name,),)
assert fct(7) == 42
def test_one_marker_text_args():
@xml_handle_text("abc", "def")
def fct(arg):
return arg * 6
assert getattr(fct, _ATTR_MARKER, None) == (
(
"abc",
"def",
XMLText.name,
),
)
assert fct(7) == 42
def test_mixed_markers():
@xml_handle_element("abc", "def")
@xml_handle_text("ghi")
@xml_handle_element("klm", "opq", "rst")
def fct(arg):
return arg * 6
assert getattr(fct, _ATTR_MARKER, None) == (
("klm", "opq", "rst"),
("ghi", XMLText.name),
("abc", "def"),
)
assert fct(7) == 42
| [
"bigxml.handler_marker.xml_handle_text",
"bigxml.handler_marker.xml_handle_element",
"pytest.raises"
] | [((169, 201), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', (['"""abc"""', '"""def"""'], {}), "('abc', 'def')\n", (187, 201), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((1360, 1392), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', (['"""abc"""', '"""def"""'], {}), "('abc', 'def')\n", (1378, 1392), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((1398, 1423), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', (['"""ghi"""'], {}), "('ghi')\n", (1416, 1423), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((1429, 1468), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', (['"""klm"""', '"""opq"""', '"""rst"""'], {}), "('klm', 'opq', 'rst')\n", (1447, 1468), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((2087, 2104), 'bigxml.handler_marker.xml_handle_text', 'xml_handle_text', ([], {}), '()\n', (2102, 2104), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((2277, 2306), 'bigxml.handler_marker.xml_handle_text', 'xml_handle_text', (['"""abc"""', '"""def"""'], {}), "('abc', 'def')\n", (2292, 2306), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((2546, 2578), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', (['"""abc"""', '"""def"""'], {}), "('abc', 'def')\n", (2564, 2578), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((2584, 2606), 'bigxml.handler_marker.xml_handle_text', 'xml_handle_text', (['"""ghi"""'], {}), "('ghi')\n", (2599, 2606), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((2612, 2651), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', (['"""klm"""', '"""opq"""', '"""rst"""'], {}), "('klm', 'opq', 'rst')\n", (2630, 2651), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((483, 515), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', (['"""abc"""', '"""def"""'], {}), "('abc', 'def')\n", (501, 515), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((801, 833), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', (['"""abc"""', '"""def"""'], {}), "('abc', 'def')\n", (819, 833), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((1126, 1158), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', (['"""abc"""', '"""def"""'], {}), "('abc', 'def')\n", (1144, 1158), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n'), ((1712, 1736), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1725, 1736), False, 'import pytest\n'), ((1748, 1768), 'bigxml.handler_marker.xml_handle_element', 'xml_handle_element', ([], {}), '()\n', (1766, 1768), False, 'from bigxml.handler_marker import _ATTR_MARKER, xml_handle_element, xml_handle_text\n')] |
"""
This example demonstrates SQL Schema generation for each DB type supported.
"""
from tortoise import fields
from tortoise.fields import SET_NULL
from tortoise.models import Model
class Tournament(Model):
tid = fields.SmallIntField(pk=True)
name = fields.CharField(max_length=100, description="Tournament name", index=True)
created = fields.DatetimeField(auto_now_add=True, description="Created */'`/* datetime")
class Meta:
table_description = "What Tournaments */'`/* we have"
class Event(Model):
id = fields.BigIntField(pk=True, description="Event ID")
name = fields.TextField()
tournament: fields.ForeignKeyRelation[Tournament] = fields.ForeignKeyField(
"models.Tournament", related_name="events", description="FK to tournament"
)
participants: fields.ManyToManyRelation["Team"] = fields.ManyToManyField(
"models.Team",
related_name="events",
through="teamevents",
description="How participants relate",
on_delete=SET_NULL,
)
modified = fields.DatetimeField(auto_now=True)
prize = fields.DecimalField(max_digits=10, decimal_places=2, null=True)
token = fields.CharField(max_length=100, description="Unique token", unique=True)
key = fields.CharField(max_length=100)
class Meta:
table_description = "This table contains a list of all the events"
unique_together = [("name", "prize"), ["tournament", "key"]]
class TeamEvent(Model):
team: fields.ForeignKeyRelation["Team"] = fields.ForeignKeyField(
"models.Team", related_name="teams"
)
event: fields.ForeignKeyRelation[Event] = fields.ForeignKeyField(
"models.Event", related_name="events"
)
score = fields.IntField()
class Meta:
table = "teamevents"
table_description = "How participants relate"
unique_together = ("team", "event")
class Team(Model):
name = fields.CharField(max_length=50, pk=True, description="The TEAM name (and PK)")
key = fields.IntField()
manager: fields.ForeignKeyRelation["Team"] = fields.ForeignKeyField(
"models.Team", related_name="team_members", null=True
)
talks_to: fields.ManyToManyRelation["Team"] = fields.ManyToManyField(
"models.Team", related_name="gets_talked_to"
)
class Meta:
table_description = "The TEAMS!"
indexes = [("manager", "key"), ["manager_id", "name"]]
| [
"tortoise.fields.BigIntField",
"tortoise.fields.CharField",
"tortoise.fields.DecimalField",
"tortoise.fields.DatetimeField",
"tortoise.fields.ManyToManyField",
"tortoise.fields.IntField",
"tortoise.fields.ForeignKeyField",
"tortoise.fields.SmallIntField",
"tortoise.fields.TextField"
] | [((221, 250), 'tortoise.fields.SmallIntField', 'fields.SmallIntField', ([], {'pk': '(True)'}), '(pk=True)\n', (241, 250), False, 'from tortoise import fields\n'), ((262, 337), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)', 'description': '"""Tournament name"""', 'index': '(True)'}), "(max_length=100, description='Tournament name', index=True)\n", (278, 337), False, 'from tortoise import fields\n'), ((352, 430), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'auto_now_add': '(True)', 'description': '"""Created */\'`/* datetime"""'}), '(auto_now_add=True, description="Created */\'`/* datetime")\n', (372, 430), False, 'from tortoise import fields\n'), ((541, 592), 'tortoise.fields.BigIntField', 'fields.BigIntField', ([], {'pk': '(True)', 'description': '"""Event ID"""'}), "(pk=True, description='Event ID')\n", (559, 592), False, 'from tortoise import fields\n'), ((604, 622), 'tortoise.fields.TextField', 'fields.TextField', ([], {}), '()\n', (620, 622), False, 'from tortoise import fields\n'), ((679, 781), 'tortoise.fields.ForeignKeyField', 'fields.ForeignKeyField', (['"""models.Tournament"""'], {'related_name': '"""events"""', 'description': '"""FK to tournament"""'}), "('models.Tournament', related_name='events',\n description='FK to tournament')\n", (701, 781), False, 'from tortoise import fields\n'), ((846, 992), 'tortoise.fields.ManyToManyField', 'fields.ManyToManyField', (['"""models.Team"""'], {'related_name': '"""events"""', 'through': '"""teamevents"""', 'description': '"""How participants relate"""', 'on_delete': 'SET_NULL'}), "('models.Team', related_name='events', through=\n 'teamevents', description='How participants relate', on_delete=SET_NULL)\n", (868, 992), False, 'from tortoise import fields\n'), ((1050, 1085), 'tortoise.fields.DatetimeField', 'fields.DatetimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1070, 1085), False, 'from tortoise import fields\n'), ((1098, 1161), 'tortoise.fields.DecimalField', 'fields.DecimalField', ([], {'max_digits': '(10)', 'decimal_places': '(2)', 'null': '(True)'}), '(max_digits=10, decimal_places=2, null=True)\n', (1117, 1161), False, 'from tortoise import fields\n'), ((1174, 1247), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)', 'description': '"""Unique token"""', 'unique': '(True)'}), "(max_length=100, description='Unique token', unique=True)\n", (1190, 1247), False, 'from tortoise import fields\n'), ((1258, 1290), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1274, 1290), False, 'from tortoise import fields\n'), ((1524, 1583), 'tortoise.fields.ForeignKeyField', 'fields.ForeignKeyField', (['"""models.Team"""'], {'related_name': '"""teams"""'}), "('models.Team', related_name='teams')\n", (1546, 1583), False, 'from tortoise import fields\n'), ((1644, 1705), 'tortoise.fields.ForeignKeyField', 'fields.ForeignKeyField', (['"""models.Event"""'], {'related_name': '"""events"""'}), "('models.Event', related_name='events')\n", (1666, 1705), False, 'from tortoise import fields\n'), ((1732, 1749), 'tortoise.fields.IntField', 'fields.IntField', ([], {}), '()\n', (1747, 1749), False, 'from tortoise import fields\n'), ((1926, 2004), 'tortoise.fields.CharField', 'fields.CharField', ([], {'max_length': '(50)', 'pk': '(True)', 'description': '"""The TEAM name (and PK)"""'}), "(max_length=50, pk=True, description='The TEAM name (and PK)')\n", (1942, 2004), False, 'from tortoise import fields\n'), ((2015, 2032), 'tortoise.fields.IntField', 'fields.IntField', ([], {}), '()\n', (2030, 2032), False, 'from tortoise import fields\n'), ((2082, 2159), 'tortoise.fields.ForeignKeyField', 'fields.ForeignKeyField', (['"""models.Team"""'], {'related_name': '"""team_members"""', 'null': '(True)'}), "('models.Team', related_name='team_members', null=True)\n", (2104, 2159), False, 'from tortoise import fields\n'), ((2224, 2292), 'tortoise.fields.ManyToManyField', 'fields.ManyToManyField', (['"""models.Team"""'], {'related_name': '"""gets_talked_to"""'}), "('models.Team', related_name='gets_talked_to')\n", (2246, 2292), False, 'from tortoise import fields\n')] |
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import ddt
from zaqar.tests.functional import base
from zaqar.tests.functional import helpers
@ddt.ddt
class TestClaims(base.V1_1FunctionalTestBase):
"""Tests for Claims."""
server_class = base.ZaqarServer
def setUp(self):
super(TestClaims, self).setUp()
self.headers = helpers.create_zaqar_headers(self.cfg)
self.client.headers = self.headers
self.queue = uuid.uuid1()
self.queue_url = ("{url}/{version}/queues/{queue}".format(
url=self.cfg.zaqar.url,
version="v1.1",
queue=self.queue))
self.client.put(self.queue_url)
self.claim_url = self.queue_url + '/claims'
self.client.set_base_url(self.claim_url)
# Post Messages
url = self.queue_url + '/messages'
doc = helpers.create_message_body_v1_1(
messagecount=self.limits.max_messages_per_page)
for i in range(10):
self.client.post(url, data=doc)
@ddt.data({}, {'limit': 2})
def test_claim_messages(self, params):
"""Claim messages."""
message_count = params.get('limit',
self.limits.max_messages_per_claim_or_pop)
doc = {"ttl": 300, "grace": 100}
result = self.client.post(params=params, data=doc)
self.assertEqual(result.status_code, 201)
self.assertSchema(result.json(), 'claim_create')
actual_message_count = len(result.json()['messages'])
self.assertMessageCount(actual_message_count, message_count)
response_headers = set(result.headers.keys())
self.assertIsSubset(self.headers_response_with_body, response_headers)
test_claim_messages.tags = ['smoke', 'positive']
def test_query_claim(self):
"""Query Claim."""
params = {'limit': 1}
doc = {"ttl": 300, "grace": 100}
result = self.client.post(params=params, data=doc)
location = result.headers['Location']
url = self.cfg.zaqar.url + location
result = self.client.get(url)
self.assertEqual(result.status_code, 200)
test_query_claim.tags = ['smoke', 'positive']
@ddt.data({}, {"grace": 100})
def test_claim_default_ttl(self, doc):
"""Create claim with default TTL and grace values."""
params = {'limit': 1}
result = self.client.post(params=params, data=doc)
self.assertEqual(result.status_code, 201)
location = result.headers['Location']
url = self.cfg.zaqar.url + location
result = self.client.get(url)
self.assertEqual(result.status_code, 200)
default_ttl = result.json()['ttl']
self.assertEqual(default_ttl, self.resource_defaults.claim_ttl)
test_claim_default_ttl.tags = ['smoke', 'positive']
def test_claim_more_than_allowed(self):
"""Claim more than max allowed per request.
Zaqar allows a maximum of 20 messages per claim by default.
"""
params = {"limit": self.limits.max_messages_per_claim_or_pop + 1}
doc = {"ttl": 300, "grace": 100}
result = self.client.post(params=params, data=doc)
self.assertEqual(result.status_code, 400)
test_claim_more_than_allowed.tags = ['negative']
def test_claim_patch(self):
"""Update Claim."""
# Test Setup - Post Claim
doc = {"ttl": 300, "grace": 400}
result = self.client.post(data=doc)
self.assertEqual(result.status_code, 201)
# Patch Claim
claim_location = result.headers['Location']
url = self.cfg.zaqar.url + claim_location
doc_updated = {"ttl": 300, 'grace': 60}
result = self.client.patch(url, data=doc_updated)
self.assertEqual(result.status_code, 204)
# verify that the claim TTL is updated
result = self.client.get(url)
new_ttl = result.json()['ttl']
self.assertEqual(doc_updated['ttl'], new_ttl)
test_claim_patch.tags = ['smoke', 'positive']
def test_delete_claimed_message(self):
"""Delete message belonging to a Claim."""
# Test Setup - Post claim
doc = {"ttl": 60, "grace": 60}
result = self.client.post(data=doc)
self.assertEqual(result.status_code, 201)
# Delete Claimed Messages
for rst in result.json()['messages']:
href = rst['href']
url = self.cfg.zaqar.url + href
result = self.client.delete(url)
self.assertEqual(result.status_code, 204)
test_delete_claimed_message.tags = ['smoke', 'positive']
def test_claim_release(self):
"""Release Claim."""
doc = {"ttl": 300, "grace": 100}
result = self.client.post(data=doc)
self.assertEqual(result.status_code, 201)
# Extract claim location and construct the claim URL.
location = result.headers['Location']
url = self.cfg.zaqar.url + location
# Release Claim.
result = self.client.delete(url)
self.assertEqual(result.status_code, 204)
test_claim_release.tags = ['smoke', 'positive']
@ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000)
def test_claim_invalid_ttl(self, ttl):
"""Post Claim with invalid TTL.
The request JSON body will have a TTL value
outside the allowed range.Allowed ttl values is
60 <= ttl <= 43200.
"""
doc = {"ttl": ttl, "grace": 100}
result = self.client.post(data=doc)
self.assertEqual(result.status_code, 400)
test_claim_invalid_ttl.tags = ['negative']
@ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000)
def test_claim_invalid_grace(self, grace):
"""Post Claim with invalid grace.
The request JSON body will have a grace value
outside the allowed range.Allowed grace values is
60 <= grace <= 43200.
"""
doc = {"ttl": 100, "grace": grace}
result = self.client.post(data=doc)
self.assertEqual(result.status_code, 400)
test_claim_invalid_grace.tags = ['negative']
@ddt.data(0, -100, 30, 10000000000000000000)
def test_claim_invalid_limit(self, grace):
"""Post Claim with invalid limit.
The request url will have a limit outside the allowed range.
Allowed limit values are 0 < limit <= 20(default max).
"""
doc = {"ttl": 100, "grace": grace}
result = self.client.post(data=doc)
self.assertEqual(result.status_code, 400)
test_claim_invalid_limit.tags = ['negative']
@ddt.data(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000)
def test_patch_claim_invalid_ttl(self, ttl):
"""Patch Claim with invalid TTL.
The request JSON body will have a TTL value
outside the allowed range.Allowed ttl values is
60 <= ttl <= 43200.
"""
doc = {"ttl": 100, "grace": 100}
result = self.client.post(data=doc)
self.assertEqual(result.status_code, 201)
# Extract claim location and construct the claim URL.
location = result.headers['Location']
url = self.cfg.zaqar.url + location
# Patch Claim.
doc = {"ttl": ttl}
result = self.client.patch(url, data=doc)
self.assertEqual(result.status_code, 400)
test_patch_claim_invalid_ttl.tags = ['negative']
def test_query_non_existing_claim(self):
"""Query Non Existing Claim."""
path = '/non-existing-claim'
result = self.client.get(path)
self.assertEqual(result.status_code, 404)
test_query_non_existing_claim.tags = ['negative']
def test_patch_non_existing_claim(self):
"""Patch Non Existing Claim."""
path = '/non-existing-claim'
doc = {"ttl": 400}
result = self.client.patch(path, data=doc)
self.assertEqual(result.status_code, 404)
test_patch_non_existing_claim.tags = ['negative']
def test_delete_non_existing_claim(self):
"""Patch Non Existing Claim."""
path = '/non-existing-claim'
result = self.client.delete(path)
self.assertEqual(result.status_code, 204)
test_delete_non_existing_claim.tags = ['negative']
def tearDown(self):
"""Delete Queue after Claim Test."""
super(TestClaims, self).tearDown()
self.client.delete(self.queue_url)
| [
"uuid.uuid1",
"zaqar.tests.functional.helpers.create_message_body_v1_1",
"ddt.data",
"zaqar.tests.functional.helpers.create_zaqar_headers"
] | [((1621, 1647), 'ddt.data', 'ddt.data', (['{}', "{'limit': 2}"], {}), "({}, {'limit': 2})\n", (1629, 1647), False, 'import ddt\n'), ((2800, 2828), 'ddt.data', 'ddt.data', (['{}', "{'grace': 100}"], {}), "({}, {'grace': 100})\n", (2808, 2828), False, 'import ddt\n'), ((5737, 5810), 'ddt.data', 'ddt.data', (['(10000000000000000000)', '(-100)', '(1)', '(59)', '(43201)', '(-10000000000000000000)'], {}), '(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000)\n', (5745, 5810), False, 'import ddt\n'), ((6233, 6306), 'ddt.data', 'ddt.data', (['(10000000000000000000)', '(-100)', '(1)', '(59)', '(43201)', '(-10000000000000000000)'], {}), '(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000)\n', (6241, 6306), False, 'import ddt\n'), ((6745, 6788), 'ddt.data', 'ddt.data', (['(0)', '(-100)', '(30)', '(10000000000000000000)'], {}), '(0, -100, 30, 10000000000000000000)\n', (6753, 6788), False, 'import ddt\n'), ((7217, 7290), 'ddt.data', 'ddt.data', (['(10000000000000000000)', '(-100)', '(1)', '(59)', '(43201)', '(-10000000000000000000)'], {}), '(10000000000000000000, -100, 1, 59, 43201, -10000000000000000000)\n', (7225, 7290), False, 'import ddt\n'), ((902, 940), 'zaqar.tests.functional.helpers.create_zaqar_headers', 'helpers.create_zaqar_headers', (['self.cfg'], {}), '(self.cfg)\n', (930, 940), False, 'from zaqar.tests.functional import helpers\n'), ((1006, 1018), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1016, 1018), False, 'import uuid\n'), ((1448, 1533), 'zaqar.tests.functional.helpers.create_message_body_v1_1', 'helpers.create_message_body_v1_1', ([], {'messagecount': 'self.limits.max_messages_per_page'}), '(messagecount=self.limits.max_messages_per_page\n )\n', (1480, 1533), False, 'from zaqar.tests.functional import helpers\n')] |
from xmlrpc.client import Boolean
import boto3
import logging
from datetime import date, datetime
from botocore.exceptions import ClientError
import json
from json_datetime_serializer import json_datetime_serializer
from kms_client import kmsClient
from search_kms_using_account_id import search_kms_using_account_id
from get_arn import get_arn
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO,
format='%(asctime)s: %(levelname)s: %(message)s')
# mrk-8b618d3b5c1d432aad977f31b29df64b
# 862638055046
def update_kms_key_policy(target_arn, key_id):
client = kmsClient()
kms_policy_data = client.get_key_policy(KeyId=key_id,PolicyName='default')
kms_policy = json.loads(kms_policy_data["Policy"])
policy_update = Boolean(False)
for statement in kms_policy["Statement"]:
if "Allow use of the key" in statement["Sid"]:
if target_arn not in statement['Principal']['AWS']:
if type(statement['Principal']['AWS']) is list:
statement['Principal']['AWS'].append(target_arn)
statement['Principal']['AWS'].sort()
policy_update = True
else:
print('Target ARN already exists in the policy')
policy_update = False
if policy_update:
response = client.put_key_policy(
KeyId=key_id,
PolicyName='default',
Policy=json.dumps(kms_policy)
)
print(response)
return kms_policy
if __name__ == '__main__':
ARN = 'arn:aws:iam::862638055046:user/test-user'
print(f"Arn: {ARN}")
kms = update_kms_key_policy(ARN)
logger.info('Getting information about KMS key...')
logger.info(
f'Key Details: {json.dumps(kms, indent=4, default=json_datetime_serializer)}'
)
logger.info('Done!')
| [
"logging.getLogger",
"logging.basicConfig",
"json.loads",
"xmlrpc.client.Boolean",
"json.dumps",
"kms_client.kmsClient"
] | [((355, 374), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (372, 374), False, 'import logging\n'), ((375, 469), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s: %(levelname)s: %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s: %(levelname)s: %(message)s')\n", (394, 469), False, 'import logging\n'), ((602, 613), 'kms_client.kmsClient', 'kmsClient', ([], {}), '()\n', (611, 613), False, 'from kms_client import kmsClient\n'), ((710, 747), 'json.loads', 'json.loads', (["kms_policy_data['Policy']"], {}), "(kms_policy_data['Policy'])\n", (720, 747), False, 'import json\n'), ((768, 782), 'xmlrpc.client.Boolean', 'Boolean', (['(False)'], {}), '(False)\n', (775, 782), False, 'from xmlrpc.client import Boolean\n'), ((1443, 1465), 'json.dumps', 'json.dumps', (['kms_policy'], {}), '(kms_policy)\n', (1453, 1465), False, 'import json\n'), ((1768, 1827), 'json.dumps', 'json.dumps', (['kms'], {'indent': '(4)', 'default': 'json_datetime_serializer'}), '(kms, indent=4, default=json_datetime_serializer)\n', (1778, 1827), False, 'import json\n')] |
# Author: <NAME>
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tornado.testing import AsyncTestCase, gen_test
try:
from twisted.internet.defer import ( # type: ignore
inlineCallbacks,
returnValue,
)
have_twisted = True
except ImportError:
have_twisted = False
else:
# Not used directly but needed for `yield deferred` to work.
import tornado.platform.twisted # noqa: F401
skipIfNoTwisted = unittest.skipUnless(have_twisted, "twisted module not present")
@skipIfNoTwisted
class ConvertDeferredTest(AsyncTestCase):
@gen_test
def test_success(self):
@inlineCallbacks
def fn():
if False:
# inlineCallbacks doesn't work with regular functions;
# must have a yield even if it's unreachable.
yield
returnValue(42)
res = yield fn()
self.assertEqual(res, 42)
@gen_test
def test_failure(self):
@inlineCallbacks
def fn():
if False:
yield
1 / 0
with self.assertRaises(ZeroDivisionError):
yield fn()
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"twisted.internet.defer.returnValue",
"unittest.skipUnless"
] | [((976, 1039), 'unittest.skipUnless', 'unittest.skipUnless', (['have_twisted', '"""twisted module not present"""'], {}), "(have_twisted, 'twisted module not present')\n", (995, 1039), False, 'import unittest\n'), ((1707, 1722), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1720, 1722), False, 'import unittest\n'), ((1375, 1390), 'twisted.internet.defer.returnValue', 'returnValue', (['(42)'], {}), '(42)\n', (1386, 1390), False, 'from twisted.internet.defer import inlineCallbacks, returnValue\n')] |
# Generated by Django 3.0.6 on 2020-06-14 05:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Color',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='色')),
],
),
migrations.CreateModel(
name='Potato',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('bud_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='potato_bud_colors', to='related_name.Color')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Fruit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('bud_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fruit_bud_colors', to='related_name.Color')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AppleWithRelatedName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_apple_color', to='related_name.Color')),
],
),
migrations.CreateModel(
name='AppleWith3Color',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('bud_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='default_colors', to='related_name.Color')),
('fruit_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fruit_colors', related_query_name='my_fruit_colors', to='related_name.Color')),
('leaf_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='leaf_colors', to='related_name.Color')),
],
options={
'default_related_name': 'default_colors',
},
),
migrations.CreateModel(
name='AppleWith2Color',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('bud_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='buds', to='related_name.Color')),
('fruit_color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fruits', to='related_name.Color')),
],
),
migrations.CreateModel(
name='AppleNoReverseWithPlus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='related_name.Color')),
],
),
migrations.CreateModel(
name='AppleNoReverseWithEndPlus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='end_plus+', to='related_name.Color')),
],
),
migrations.CreateModel(
name='AppleDefaultRelatedName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_name_appledefaultrelatedname_list', to='related_name.Color')),
],
options={
'default_related_name': '%(app_label)s_%(class)s_list',
},
),
migrations.CreateModel(
name='Apple',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True, verbose_name='品種名')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='related_name.Color')),
],
),
]
| [
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((334, 427), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (350, 427), False, 'from django.db import migrations, models\n'), ((451, 513), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""色"""'}), "(max_length=30, unique=True, verbose_name='色')\n", (467, 513), False, 'from django.db import migrations, models\n'), ((645, 738), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (661, 738), False, 'from django.db import migrations, models\n'), ((762, 826), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""品種名"""'}), "(max_length=30, unique=True, verbose_name='品種名')\n", (778, 826), False, 'from django.db import migrations, models\n'), ((859, 985), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""potato_bud_colors"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='potato_bud_colors', to='related_name.Color')\n", (876, 985), False, 'from django.db import migrations, models\n'), ((1183, 1276), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1199, 1276), False, 'from django.db import migrations, models\n'), ((1300, 1364), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""品種名"""'}), "(max_length=30, unique=True, verbose_name='品種名')\n", (1316, 1364), False, 'from django.db import migrations, models\n'), ((1397, 1522), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""fruit_bud_colors"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='fruit_bud_colors', to='related_name.Color')\n", (1414, 1522), False, 'from django.db import migrations, models\n'), ((1735, 1828), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1751, 1828), False, 'from django.db import migrations, models\n'), ((1852, 1916), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""品種名"""'}), "(max_length=30, unique=True, verbose_name='品種名')\n", (1868, 1916), False, 'from django.db import migrations, models\n'), ((1945, 2068), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""my_apple_color"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='my_apple_color', to='related_name.Color')\n", (1962, 2068), False, 'from django.db import migrations, models\n'), ((2204, 2297), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2220, 2297), False, 'from django.db import migrations, models\n'), ((2321, 2385), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""品種名"""'}), "(max_length=30, unique=True, verbose_name='品種名')\n", (2337, 2385), False, 'from django.db import migrations, models\n'), ((2418, 2541), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""default_colors"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='default_colors', to='related_name.Color')\n", (2435, 2541), False, 'from django.db import migrations, models\n'), ((2571, 2735), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""fruit_colors"""', 'related_query_name': '"""my_fruit_colors"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='fruit_colors', related_query_name='my_fruit_colors', to=\n 'related_name.Color')\n", (2588, 2735), False, 'from django.db import migrations, models\n'), ((2759, 2879), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""leaf_colors"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='leaf_colors', to='related_name.Color')\n", (2776, 2879), False, 'from django.db import migrations, models\n'), ((3110, 3203), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3126, 3203), False, 'from django.db import migrations, models\n'), ((3227, 3291), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""品種名"""'}), "(max_length=30, unique=True, verbose_name='品種名')\n", (3243, 3291), False, 'from django.db import migrations, models\n'), ((3324, 3437), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""buds"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='buds', to='related_name.Color')\n", (3341, 3437), False, 'from django.db import migrations, models\n'), ((3467, 3582), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""fruits"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='fruits', to='related_name.Color')\n", (3484, 3582), False, 'from django.db import migrations, models\n'), ((3725, 3818), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (3741, 3818), False, 'from django.db import migrations, models\n'), ((3842, 3906), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""品種名"""'}), "(max_length=30, unique=True, verbose_name='品種名')\n", (3858, 3906), False, 'from django.db import migrations, models\n'), ((3935, 4045), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='+', to='related_name.Color')\n", (3952, 4045), False, 'from django.db import migrations, models\n'), ((4191, 4284), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (4207, 4284), False, 'from django.db import migrations, models\n'), ((4308, 4372), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""品種名"""'}), "(max_length=30, unique=True, verbose_name='品種名')\n", (4324, 4372), False, 'from django.db import migrations, models\n'), ((4401, 4519), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""end_plus+"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='end_plus+', to='related_name.Color')\n", (4418, 4519), False, 'from django.db import migrations, models\n'), ((4663, 4756), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (4679, 4756), False, 'from django.db import migrations, models\n'), ((4780, 4844), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""品種名"""'}), "(max_length=30, unique=True, verbose_name='品種名')\n", (4796, 4844), False, 'from django.db import migrations, models\n'), ((4873, 5023), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""related_name_appledefaultrelatedname_list"""', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='related_name_appledefaultrelatedname_list', to='related_name.Color')\n", (4890, 5023), False, 'from django.db import migrations, models\n'), ((5258, 5351), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (5274, 5351), False, 'from django.db import migrations, models\n'), ((5375, 5439), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""品種名"""'}), "(max_length=30, unique=True, verbose_name='品種名')\n", (5391, 5439), False, 'from django.db import migrations, models\n'), ((5468, 5560), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""related_name.Color"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'related_name.Color')\n", (5485, 5560), False, 'from django.db import migrations, models\n')] |
# ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import logging.handlers as lh
import os
import sys
from controller.framework.ControllerModule import ControllerModule
class Logger(ControllerModule):
def __init__(self, CFxHandle, paramDict, ModuleName):
super(Logger, self).__init__(CFxHandle, paramDict, ModuleName)
def initialize(self):
# Extracts the controller Log Level from the ipop-config file,
# If nothing is provided the default is INFO
if "LogLevel" in self.CMConfig:
level = getattr(logging, self.CMConfig["LogLevel"])
else:
level = getattr(logging, "info")
# Check whether the Logging is set to File by the User
if self.CMConfig["LogOption"] == "Console":
# Console logging
logging.basicConfig(format='[%(asctime)s.%(msecs)03d] %(levelname)s:\n%(message)s\n', datefmt='%H:%M:%S',
level=level)
logging.info("Logger Module Loaded")
else:
# Extracts the filepath else sets logs to current working directory
filepath = self.CMConfig.get("LogFilePath", "./")
fqname = filepath + \
self.CMConfig.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
self.logger = logging.getLogger("IPOP Rotating Log")
self.logger.setLevel(level)
# Creates rotating filehandler
handler = lh.RotatingFileHandler(filename=fqname, maxBytes=self.CMConfig["LogFileSize"],
backupCount=self.CMConfig["BackupLogFileCount"])
formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt='%Y%m%d %H:%M:%S')
handler.setFormatter(formatter)
# Adds the filehandler to the Python logger module
self.logger.addHandler(handler)
# PKTDUMP mode dumps packet information
logging.addLevelName(5, "PKTDUMP")
logging.PKTDUMP = 5
def processCBT(self, cbt):
# Extracting the logging level information from the CBT action tag
if cbt.action == 'debug':
if self.CMConfig["LogOption"] == "File":
self.logger.debug(cbt.initiator + ": " + cbt.data)
else:
logging.debug(cbt.initiator + ": " + cbt.data)
elif cbt.action == 'info':
if self.CMConfig["LogOption"] == "File":
self.logger.info(cbt.initiator + ": " + cbt.data)
else:
logging.info(cbt.initiator + ": " + cbt.data)
elif cbt.action == 'warning':
if self.CMConfig["LogOption"] == "File":
self.logger.warning(cbt.initiator + ": " + cbt.data)
else:
logging.warning(cbt.initiator + ": " + cbt.data)
elif cbt.action == 'error':
if self.CMConfig["LogOption"] == "File":
self.logger.error(cbt.initiator + ": " + cbt.data)
else:
logging.error(cbt.initiator + ": " + cbt.data)
elif cbt.action == "pktdump":
self.pktdump(message=cbt.data.get('message'),
dump=cbt.data.get('dump'))
else:
log = '{0}: unrecognized CBT {1} received from {2}'\
.format(cbt.recipient, cbt.action, cbt.initiator)
self.registerCBT('Logger', 'warning', log)
def timer_method(self):
pass
def pktdump(self, message, dump=None, *args, **argv):
hext = ""
if dump:
for i in range(0, len(dump), 2):
hext += dump[i:i+2].encode("hex")
hext += " "
if i % 16 == 14:
hext += "\n"
logging.log(5, message + "\n" + hext)
else:
logging.log(5, message, *args, **argv)
def terminate(self):
pass
| [
"logging.basicConfig",
"logging.getLogger",
"logging.debug",
"logging.Formatter",
"logging.handlers.RotatingFileHandler",
"logging.warning",
"logging.log",
"os.path.isdir",
"os.mkdir",
"logging.addLevelName",
"logging.info",
"logging.error"
] | [((3104, 3138), 'logging.addLevelName', 'logging.addLevelName', (['(5)', '"""PKTDUMP"""'], {}), "(5, 'PKTDUMP')\n", (3124, 3138), False, 'import logging\n'), ((1880, 2010), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s.%(msecs)03d] %(levelname)s:\n%(message)s\n"""', 'datefmt': '"""%H:%M:%S"""', 'level': 'level'}), '(format=\n """[%(asctime)s.%(msecs)03d] %(levelname)s:\n%(message)s\n""", datefmt=\n \'%H:%M:%S\', level=level)\n', (1899, 2010), False, 'import logging\n'), ((2043, 2079), 'logging.info', 'logging.info', (['"""Logger Module Loaded"""'], {}), "('Logger Module Loaded')\n", (2055, 2079), False, 'import logging\n'), ((2438, 2476), 'logging.getLogger', 'logging.getLogger', (['"""IPOP Rotating Log"""'], {}), "('IPOP Rotating Log')\n", (2455, 2476), False, 'import logging\n'), ((2582, 2714), 'logging.handlers.RotatingFileHandler', 'lh.RotatingFileHandler', ([], {'filename': 'fqname', 'maxBytes': "self.CMConfig['LogFileSize']", 'backupCount': "self.CMConfig['BackupLogFileCount']"}), "(filename=fqname, maxBytes=self.CMConfig[\n 'LogFileSize'], backupCount=self.CMConfig['BackupLogFileCount'])\n", (2604, 2714), True, 'import logging.handlers as lh\n'), ((2779, 2882), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s"""'], {'datefmt': '"""%Y%m%d %H:%M:%S"""'}), "('[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s',\n datefmt='%Y%m%d %H:%M:%S')\n", (2796, 2882), False, 'import logging\n'), ((4912, 4949), 'logging.log', 'logging.log', (['(5)', "(message + '\\n' + hext)"], {}), "(5, message + '\\n' + hext)\n", (4923, 4949), False, 'import logging\n'), ((4976, 5014), 'logging.log', 'logging.log', (['(5)', 'message', '*args'], {}), '(5, message, *args, **argv)\n', (4987, 5014), False, 'import logging\n'), ((2354, 2377), 'os.path.isdir', 'os.path.isdir', (['filepath'], {}), '(filepath)\n', (2367, 2377), False, 'import os\n'), ((2393, 2411), 'os.mkdir', 'os.mkdir', (['filepath'], {}), '(filepath)\n', (2401, 2411), False, 'import os\n'), ((3462, 3508), 'logging.debug', 'logging.debug', (["(cbt.initiator + ': ' + cbt.data)"], {}), "(cbt.initiator + ': ' + cbt.data)\n", (3475, 3508), False, 'import logging\n'), ((3697, 3742), 'logging.info', 'logging.info', (["(cbt.initiator + ': ' + cbt.data)"], {}), "(cbt.initiator + ': ' + cbt.data)\n", (3709, 3742), False, 'import logging\n'), ((3937, 3985), 'logging.warning', 'logging.warning', (["(cbt.initiator + ': ' + cbt.data)"], {}), "(cbt.initiator + ': ' + cbt.data)\n", (3952, 3985), False, 'import logging\n'), ((4176, 4222), 'logging.error', 'logging.error', (["(cbt.initiator + ': ' + cbt.data)"], {}), "(cbt.initiator + ': ' + cbt.data)\n", (4189, 4222), False, 'import logging\n')] |
# Generated by Django 3.1.3 on 2020-12-13 08:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0010_auto_20201212_2058'),
]
operations = [
migrations.AddField(
model_name='watch',
name='slug',
field=models.SlugField(default=1, editable=False),
preserve_default=False,
),
]
| [
"django.db.models.SlugField"
] | [((329, 372), 'django.db.models.SlugField', 'models.SlugField', ([], {'default': '(1)', 'editable': '(False)'}), '(default=1, editable=False)\n', (345, 372), False, 'from django.db import migrations, models\n')] |
from django.contrib import admin
from .models import Writer, Record
admin.site.register(Writer)
admin.site.register(Record)
| [
"django.contrib.admin.site.register"
] | [((69, 96), 'django.contrib.admin.site.register', 'admin.site.register', (['Writer'], {}), '(Writer)\n', (88, 96), False, 'from django.contrib import admin\n'), ((97, 124), 'django.contrib.admin.site.register', 'admin.site.register', (['Record'], {}), '(Record)\n', (116, 124), False, 'from django.contrib import admin\n')] |
from pykalman import KalmanFilter
import numpy as np
kf = KalmanFilter(transition_matrices=np.array([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]),
observation_matrices=np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0]]),
transition_covariance=0.003 * np.eye(6, dtype=float)) # TODO: change this constant
t = 0
means = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
covariances = np.eye(6, dtype=float)
def kalman_filter(measurement):
global t, means, covariances
new_filtered_means, new_filtered_covariances = (kf.filter_update(means, covariances, measurement))
means, covariances = new_filtered_means, new_filtered_covariances
t = t + 1.0
# print(means[:3]);
return means[:3]
| [
"numpy.array",
"numpy.eye"
] | [((899, 939), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (907, 939), True, 'import numpy as np\n'), ((954, 976), 'numpy.eye', 'np.eye', (['(6)'], {'dtype': 'float'}), '(6, dtype=float)\n', (960, 976), True, 'import numpy as np\n'), ((92, 304), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0,\n 1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 1.0, 0.0], [\n 0.0, 0.0, 1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n', (100, 304), True, 'import numpy as np\n'), ((575, 686), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [\n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]])\n', (583, 686), True, 'import numpy as np\n'), ((829, 851), 'numpy.eye', 'np.eye', (['(6)'], {'dtype': 'float'}), '(6, dtype=float)\n', (835, 851), True, 'import numpy as np\n')] |
from api.models import Payroll
class Payrolls(object):
def __init__(self, cursor):
"""
:param cursor: MySQLdb.cursor.Cursor
"""
self.cursor = cursor
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
@staticmethod
def _map_payroll(payroll_db_data):
return Payroll(*payroll_db_data)
def readall(self):
self.cursor.execute(
"""
SELECT
Id, EmployeeId, PaymentId, ProjectId, PayrollDate
FROM
payrolls
"""
)
payrolls = self.cursor.fetchall()
mapped = map(self._map_payroll, payrolls)
return list(mapped)
def add(self, payroll):
params = (
payroll.employee_id,
payroll.payment_id,
payroll.project_id,
payroll.payroll_date,
)
self.cursor.execute(
"""
INSERT
INTO payrolls
(EmployeeId, PaymentId, ProjectId, PayrollDate)
VALUES
(%s, %s, %s, %s)
""",
params
)
def remove(self, payroll_id):
self.cursor.execute(
"""
DELETE
FROM payrolls
WHERE Id=%s
""",
[payroll_id]
)
def update(self, payroll, payroll_id):
params = (
payroll.employee_id,
payroll.payment_id,
payroll.project_id,
payroll.payroll_date,
payroll_id
)
self.cursor.execute(
"""
UPDATE
payrolls
SET
EmployeeId=%s,
PaymentId=%s,
ProjectId=%s,
PayrollDate=%s
WHERE
payrolls.Id=%s
""",
params
)
| [
"api.models.Payroll"
] | [((413, 438), 'api.models.Payroll', 'Payroll', (['*payroll_db_data'], {}), '(*payroll_db_data)\n', (420, 438), False, 'from api.models import Payroll\n')] |
"""--------------------------------------------------------------------
COPYRIGHT 2014 Stanley Innovation Inc.
Software License Agreement:
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\file segway_system_wd.py
\brief This is the segway system watchdod which monitors signals
from the embedded power system to safely shutdown the PC
upon embedded powerdown
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
import socket
import sys
import rospy
import os
from utils import m32
class SegwayWatchdog:
def __init__(self):
"""
Initialize the UDP connection
"""
self._continue = True
self.conn = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.conn.setblocking(False)
self.conn.bind(('',6234))
def Receive(self):
"""
Try receiving the data up to a maximum size. If it fails
empty the data
"""
try:
data = self.conn.recv(4)
except:
data = []
if (len(data) == 4):
rx_dat = [ord(i) for i in data]
shutdwn_cmd = m32(rx_dat)
if (0x8756BAEB == shutdwn_cmd):
rospy.logerr("Platform signaled shutdown, need to shutdown the onboard PC")
self.Close()
os.system("sudo shutdown now -h")
sys.exit(0)
def Close(self):
self.conn.close()
| [
"rospy.logerr",
"socket.socket",
"utils.m32",
"sys.exit",
"os.system"
] | [((2103, 2151), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (2116, 2151), False, 'import socket\n'), ((2568, 2579), 'utils.m32', 'm32', (['rx_dat'], {}), '(rx_dat)\n', (2571, 2579), False, 'from utils import m32\n'), ((2640, 2715), 'rospy.logerr', 'rospy.logerr', (['"""Platform signaled shutdown, need to shutdown the onboard PC"""'], {}), "('Platform signaled shutdown, need to shutdown the onboard PC')\n", (2652, 2715), False, 'import rospy\n'), ((2761, 2794), 'os.system', 'os.system', (['"""sudo shutdown now -h"""'], {}), "('sudo shutdown now -h')\n", (2770, 2794), False, 'import os\n'), ((2811, 2822), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2819, 2822), False, 'import sys\n')] |
#!/usr/bin/env python
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from getopt import getopt, GetoptError
from random import randint
import os
SUITE=\
"""*** Settings ***
Resource resource.txt
*** Test Cases ***
%TESTCASES%
*** Keywords ***
Test Keyword
Log jee
"""
RESOURCE=\
"""*** Variables ***
@{Resource Var} MOI
*** Keywords ***
%KEYWORDS%
"""
KEYWORD_TEMPLATE=\
"""My Keyword %KW_ID%
No Operation"""
TEST_CASE_TEMPLATE=\
"""My Test %TEST_ID%
My Keyword %KW_ID%
Log moi
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
My Keyword %KW_ID%
Test Keyword
Log moi
Test Keyword
Log moi
Test Keyword
Log moi"""
def generate_tests(number_of_tests, number_of_keywords):
mytests = range(number_of_tests)
return '\n'.join(TEST_CASE_TEMPLATE.replace('%TEST_ID%', str(test_id))\
.replace('%KW_ID%', str(randint(0,number_of_keywords-1)))\
for test_id in mytests)
def generate_keywords(number_of_keywords):
mykeywords = range(number_of_keywords)
return '\n'.join(KEYWORD_TEMPLATE.replace('%KW_ID%', str(i)) for i in mykeywords)
def generate_suite(number_of_tests, number_of_keywords):
return SUITE.replace('%TESTCASES%', generate_tests(number_of_tests, number_of_keywords))\
.replace('%KEYWORDS%', generate_keywords(number_of_keywords))
def generate_resource(number_of_keywords):
return RESOURCE.replace('%KEYWORDS%', generate_keywords(number_of_keywords))
def generate(directory, suites, tests, keywords):
os.mkdir(directory)
mysuites = range(suites)
for suite_index in mysuites:
f = open(os.path.join('.', directory, 'suite%s.txt' % suite_index), 'w')
f.write(generate_suite(tests, keywords))
f.close()
r = open(os.path.join('.', directory, 'resource.txt'), 'w')
r.write(generate_resource(keywords))
r.close()
def usage():
print('datagenerator.py -d [directory] -s [NUMBER OF SUITES] -t [NUMBER OF TESTS IN SUITE] -k [NUMBER OF KEYWORDS]')
def main(args):
try:
opts, args = getopt(args, 'd:s:t:k:', [])
except GetoptError as e:
print(e)
usage()
sys.exit(2)
if len(opts) != 4:
if opts:
print(opts)
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-d':
directory = arg
if opt == '-s':
suites = int(arg)
if opt == '-t':
tests = int(arg)
if opt == '-k':
keywords = int(arg)
generate(directory, suites, tests, keywords)
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| [
"getopt.getopt",
"os.path.join",
"os.mkdir",
"sys.exit",
"random.randint"
] | [((2243, 2262), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (2251, 2262), False, 'import os\n'), ((2486, 2530), 'os.path.join', 'os.path.join', (['"""."""', 'directory', '"""resource.txt"""'], {}), "('.', directory, 'resource.txt')\n", (2498, 2530), False, 'import os\n'), ((2776, 2804), 'getopt.getopt', 'getopt', (['args', '"""d:s:t:k:"""', '[]'], {}), "(args, 'd:s:t:k:', [])\n", (2782, 2804), False, 'from getopt import getopt, GetoptError\n'), ((2975, 2986), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2983, 2986), False, 'import sys\n'), ((2342, 2399), 'os.path.join', 'os.path.join', (['"""."""', 'directory', "('suite%s.txt' % suite_index)"], {}), "('.', directory, 'suite%s.txt' % suite_index)\n", (2354, 2399), False, 'import os\n'), ((2875, 2886), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2883, 2886), False, 'import sys\n'), ((1575, 1609), 'random.randint', 'randint', (['(0)', '(number_of_keywords - 1)'], {}), '(0, number_of_keywords - 1)\n', (1582, 1609), False, 'from random import randint\n')] |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import gzip
import numpy as np
import matplotlib.pyplot as plt
from Bio import SeqIO, SeqUtils
# -
# !rm -f atroparvus.fa.gz gambiae.fa.gz 2>/dev/null
# !wget https://vectorbase.org/common/downloads/Current_Release/AgambiaePEST/fasta/data/VectorBase-55_AgambiaePEST_Genome.fasta -O gambiae.fa
# !gzip -9 gambiae.fa
# !wget https://vectorbase.org/common/downloads/Current_Release/AatroparvusEBRO/fasta/data/VectorBase-55_AatroparvusEBRO_Genome.fasta -O atroparvus.fa
# !gzip -9 atroparvus.fa
gambiae_name = 'gambiae.fa.gz'
atroparvus_name = 'atroparvus.fa.gz'
recs = SeqIO.parse(gzip.open(gambiae_name, 'rt', encoding='utf-8'), 'fasta')
for rec in recs:
print(rec.description)
#Do not do this with atroparvus
recs = SeqIO.parse(gzip.open(gambiae_name, 'rt', encoding='utf-8'), 'fasta')
chrom_Ns = {}
chrom_sizes = {}
for rec in recs:
if rec.description.find('supercontig') > -1:
continue
print(rec.description, rec.id, rec)
chrom = rec.id.split('_')[1]
if chrom in ['UNKN']:#, 'Y_unplaced']:
continue
chrom_Ns[chrom] = []
on_N = False
curr_size = 0
for pos, nuc in enumerate(rec.seq):
if nuc in ['N', 'n']:
curr_size += 1
on_N = True
else:
if on_N:
chrom_Ns[chrom].append(curr_size)
curr_size = 0
on_N = False
if on_N:
chrom_Ns[chrom].append(curr_size)
chrom_sizes[chrom] = len(rec.seq)
for chrom, Ns in chrom_Ns.items():
size = chrom_sizes[chrom]
if len(Ns) > 0:
max_Ns = max(Ns)
else:
max_Ns = 'NA'
print(f'{chrom} ({size}): %Ns ({round(100 * sum(Ns) / size, 1)}), num Ns: {len(Ns)}, max N: {max_Ns}')
# ## Atroparvus super-contigs
recs = SeqIO.parse(gzip.open(atroparvus_name, 'rt', encoding='utf-8'), 'fasta')
sizes = []
size_N = []
for rec in recs:
size = len(rec.seq)
sizes.append(size)
count_N = 0
for nuc in rec.seq:
if nuc in ['n', 'N']:
count_N += 1
size_N.append((size, count_N / size))
print(len(sizes), np.median(sizes), np.mean(sizes), max(sizes), min(sizes),
np.percentile(sizes, 10), np.percentile(sizes, 90))
small_split = 4800
large_split = 540000
fig, axs = plt.subplots(1, 3, figsize=(16, 9), dpi=300, squeeze=False, sharey=True)
xs, ys = zip(*[(x, 100 * y) for x, y in size_N if x <= small_split])
axs[0, 0].plot(xs, ys, '.')
xs, ys = zip(*[(x, 100 * y) for x, y in size_N if x > small_split and x <= large_split])
axs[0, 1].plot(xs, ys, '.')
axs[0, 1].set_xlim(small_split, large_split)
xs, ys = zip(*[(x, 100 * y) for x, y in size_N if x > large_split])
axs[0, 2].plot(xs, ys, '.')
axs[0, 0].set_ylabel('Fraction of Ns', fontsize=12)
axs[0, 1].set_xlabel('Contig size', fontsize=12)
fig.suptitle('Fraction of Ns per contig size', fontsize=26)
fig.savefig('frac.png')
| [
"numpy.mean",
"numpy.median",
"gzip.open",
"numpy.percentile",
"matplotlib.pyplot.subplots"
] | [((2511, 2583), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(16, 9)', 'dpi': '(300)', 'squeeze': '(False)', 'sharey': '(True)'}), '(1, 3, figsize=(16, 9), dpi=300, squeeze=False, sharey=True)\n', (2523, 2583), True, 'import matplotlib.pyplot as plt\n'), ((865, 912), 'gzip.open', 'gzip.open', (['gambiae_name', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(gambiae_name, 'rt', encoding='utf-8')\n", (874, 912), False, 'import gzip\n'), ((1019, 1066), 'gzip.open', 'gzip.open', (['gambiae_name', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(gambiae_name, 'rt', encoding='utf-8')\n", (1028, 1066), False, 'import gzip\n'), ((2039, 2089), 'gzip.open', 'gzip.open', (['atroparvus_name', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(atroparvus_name, 'rt', encoding='utf-8')\n", (2048, 2089), False, 'import gzip\n'), ((2343, 2359), 'numpy.median', 'np.median', (['sizes'], {}), '(sizes)\n', (2352, 2359), True, 'import numpy as np\n'), ((2361, 2375), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (2368, 2375), True, 'import numpy as np\n'), ((2407, 2431), 'numpy.percentile', 'np.percentile', (['sizes', '(10)'], {}), '(sizes, 10)\n', (2420, 2431), True, 'import numpy as np\n'), ((2433, 2457), 'numpy.percentile', 'np.percentile', (['sizes', '(90)'], {}), '(sizes, 90)\n', (2446, 2457), True, 'import numpy as np\n')] |
import json
import pandas as pd
import random
df = pd.read_csv("experiment\stimuli_creation\maze_lexemes.csv")
states = ["California","Alabama","Alaska","Arizona","Arkansas","Connecticut","Colorado","Delaware","Florida","Georgia","Hawaii","Idaho","Illinois","Indiana","Iowa","Kansas","Kentucky","Louisiana","Maine","Maryland","Massachusetts","Michigan","Minnesota","Mississippi","Missouri","Montana","Nebraska","Nevada","New Hampshire","New Jersey","New Mexico","New York","North Carolina","North Dakota","Ohio","Oklahoma","Oregon","Pennsylvania","Rhode Island","South Carolina","South Dakota","Tennessee","Texas","Utah","Vermont","Virginia","Washington","West Virginia","Wisconsin","Wyoming"]
random.shuffle(states)
activities = ["swimming","writing","singing","dancing","hiking","running","reading","drawing","painting","cooking","cycling","walking","studying","surfing","camping"]
random.shuffle(activities)
stim_list = []
coin = [0,1]
entry = []
status = 1
with open("experiment\stimuli_creation\maze_stims.csv", 'w') as stim_input:
stim_input.write("name,be,det,target,prep,state,pro,like,activity,question1,answer1,question2,answer2,gender,lexeme,orthog,condition,id")
stim_input.write("\n")
for index,row in df.iterrows():
status +=1
state = states.pop()
activity = random.choice(activities)
antistate = random.choice(states)
activity_2 = random.choice(activities)
stim_input.write("NAME,is,")
stim_input.write(row["det"])
stim_input.write("," + row["neutral"])
stim_input.write(",from,")
stim_input.write(state+".")
stim_input.write(",She,")
stim_input.write("likes,")
stim_input.write(activity+".")
entry.append(str(('female;'+str(status)+';Jane is '+row["det"]+" "+row["neutral"]+" from "+state+". She likes "+activity+".")))
activity_chance = random.choice(coin)
if activity_chance == 0:
stim_input.write(",Does NAME like "+activity_2+"?")
if activity == activity_2:
stim_input.write(",Yes")
else:
stim_input.write(",No")
else:
stim_input.write(",Does NAME like "+activity+"?")
stim_input.write(",Yes")
chance = random.choice(coin)
if chance == 0:
stim_input.write(",Is NAME from "+antistate+"?")
stim_input.write(",No,")
else:
stim_input.write(",Is NAME from "+state+"?")
stim_input.write(",Yes,")
stim_input.write("female,"+row['lexeme']+','+row["female"]+',')
stim_input.write("neutral_female"+',')
stim_input.write(row['lexeme'])
stim_input.write("_neutral_female")
stim_input.write('\n')
stim_input.write("NAME,is,")
stim_input.write(row["det"])
stim_input.write("," + row["female"])
stim_input.write(",from,")
stim_input.write(state+".")
stim_input.write(",She,")
stim_input.write("likes,")
stim_input.write(activity+".")
entry.append(str(('female;'+str(status)+';Jane is '+row["det"]+" "+row["female"]+" from "+state+". She likes "+activity+".")))
if activity_chance == 0:
stim_input.write(",Does NAME like "+activity_2+"?")
if activity == activity_2:
stim_input.write(",Yes")
else:
stim_input.write(",No")
else:
stim_input.write(",Does NAME like "+activity+"?")
stim_input.write(",Yes")
if chance == 0:
stim_input.write(",Is NAME from "+antistate+"?")
stim_input.write(",No,")
else:
stim_input.write(",Is NAME from "+state+"?")
stim_input.write(",Yes,")
stim_input.write("female,"+row['lexeme']+','+row["female"]+',')
stim_input.write("congruent_female"+',')
stim_input.write(row['lexeme'])
stim_input.write("_congruent_female")
stim_input.write('\n')
stim_input.write("NAME,is,")
stim_input.write(row["det"])
stim_input.write("," + row["neutral"])
stim_input.write(",from,")
stim_input.write(state+".")
stim_input.write(",He,")
stim_input.write("likes,")
stim_input.write(activity+".")
entry.append(str(('male;'+str(status)+';John is '+row["det"]+" "+row["neutral"]+" from "+state+". He likes "+activity+".")))
if activity_chance == 0:
stim_input.write(",Does NAME like "+activity_2+"?")
if activity == activity_2:
stim_input.write(",Yes")
else:
stim_input.write(",No")
else:
stim_input.write(",Does NAME like "+activity+"?")
stim_input.write(",Yes")
if chance == 0:
stim_input.write(",Is NAME from "+antistate+"?")
stim_input.write(",No,")
else:
stim_input.write(",Is NAME from "+state+"?")
stim_input.write(",Yes,")
stim_input.write("male,"+row['lexeme']+','+row["male"]+',')
stim_input.write("neutral_male"+',')
stim_input.write(row["lexeme"]+"_neutral_male")
stim_input.write('\n')
stim_input.write("NAME,is,")
stim_input.write(row["det"])
stim_input.write("," + row["male"])
stim_input.write(",from,")
stim_input.write(state+".")
stim_input.write(",He,")
stim_input.write("likes,")
stim_input.write(activity+".")
entry.append(str(('male;'+str(status)+';John is '+row["det"]+" "+row["male"]+" from "+state+". He likes "+activity+".")))
if activity_chance == 0:
stim_input.write(",Does NAME like "+activity_2+"?")
if activity == activity_2:
stim_input.write(",Yes")
else:
stim_input.write(",No")
else:
stim_input.write(",Does NAME like "+activity+"?")
stim_input.write(",Yes")
if chance == 0:
stim_input.write(",Is NAME from "+antistate+"?")
stim_input.write(",No,")
else:
stim_input.write(",Is NAME from "+state+"?")
stim_input.write(",Yes,")
stim_input.write("male,"+row['lexeme']+','+row["male"]+',')
stim_input.write("congruent_male"+',')
stim_input.write(row['lexeme'])
stim_input.write("_congruent_male")
stim_input.write('\n')
stim_list.append(row['lexeme'])
stim_list.append(row['neutral'])
stim_list.append(row['male'])
stim_list.append(row['female'])
with open('list_file.txt', 'w') as stim_checker:
stim_checker.write(str(stim_list))
with open('to-be-matched.txt', 'w') as match_list:
for sentence in entry:
match_list.write(str(sentence)+"\n")
| [
"random.choice",
"random.shuffle",
"pandas.read_csv"
] | [((52, 113), 'pandas.read_csv', 'pd.read_csv', (['"""experiment\\\\stimuli_creation\\\\maze_lexemes.csv"""'], {}), "('experiment\\\\stimuli_creation\\\\maze_lexemes.csv')\n", (63, 113), True, 'import pandas as pd\n'), ((697, 719), 'random.shuffle', 'random.shuffle', (['states'], {}), '(states)\n', (711, 719), False, 'import random\n'), ((889, 915), 'random.shuffle', 'random.shuffle', (['activities'], {}), '(activities)\n', (903, 915), False, 'import random\n'), ((1318, 1343), 'random.choice', 'random.choice', (['activities'], {}), '(activities)\n', (1331, 1343), False, 'import random\n'), ((1364, 1385), 'random.choice', 'random.choice', (['states'], {}), '(states)\n', (1377, 1385), False, 'import random\n'), ((1407, 1432), 'random.choice', 'random.choice', (['activities'], {}), '(activities)\n', (1420, 1432), False, 'import random\n'), ((1895, 1914), 'random.choice', 'random.choice', (['coin'], {}), '(coin)\n', (1908, 1914), False, 'import random\n'), ((2280, 2299), 'random.choice', 'random.choice', (['coin'], {}), '(coin)\n', (2293, 2299), False, 'import random\n')] |
from pyfasta import Fasta
def writebed(probelist, outbedfile):
'''probe list format:
chr\tstart\tend
'''
outio = open(outbedfile, 'w')
for pbnow in probelist:
print(pbnow, file=outio)
outio.close()
def writefa(genomefile, bedfile, outfile):
fastafile = Fasta(genomefile)
bedio = open(bedfile, 'r')
outio = open(outfile, 'w')
for lin in bedio.readlines():
lin = lin.rstrip()
chrnow, start, end = lin.split('\t')
seqid = '>' + chrnow + ':' + start + '-' + end
nowseq = fastafile[chrnow][int(start):int(end)]
print(seqid, file=outio)
print(nowseq, file=outio)
bedio.close()
outio.close()
# return True
| [
"pyfasta.Fasta"
] | [((304, 321), 'pyfasta.Fasta', 'Fasta', (['genomefile'], {}), '(genomefile)\n', (309, 321), False, 'from pyfasta import Fasta\n')] |
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from action_tutorials_interfaces.action import Fibonacci
class FibonacciActionClient(Node):
def __init__(self):
super().__init__('fibonacci_action_client')
self._action_client = ActionClient(self, Fibonacci, 'fibonacci')
def send_goal(self, order):
goal_msg = Fibonacci.Goal()
goal_msg.order = order
self._action_client.wait_for_server()
return self._action_client.send_goal_async(goal_msg)
def main(args=None):
rclpy.init(args=args)
action_client = FibonacciActionClient()
future = action_client.send_goal(10)
rclpy.spin_until_future_complete(action_client, future)
if __name__ == '__main__':
main()
| [
"rclpy.spin_until_future_complete",
"rclpy.init",
"action_tutorials_interfaces.action.Fibonacci.Goal",
"rclpy.action.ActionClient"
] | [((560, 581), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (570, 581), False, 'import rclpy\n'), ((674, 729), 'rclpy.spin_until_future_complete', 'rclpy.spin_until_future_complete', (['action_client', 'future'], {}), '(action_client, future)\n', (706, 729), False, 'import rclpy\n'), ((281, 323), 'rclpy.action.ActionClient', 'ActionClient', (['self', 'Fibonacci', '"""fibonacci"""'], {}), "(self, Fibonacci, 'fibonacci')\n", (293, 323), False, 'from rclpy.action import ActionClient\n'), ((376, 392), 'action_tutorials_interfaces.action.Fibonacci.Goal', 'Fibonacci.Goal', ([], {}), '()\n', (390, 392), False, 'from action_tutorials_interfaces.action import Fibonacci\n')] |
from web import app
from glob import glob
app.run(
debug=True,
host='0.0.0.0',
port=5000,
extra_files=glob('./web/templates/**.html')
)
| [
"glob.glob"
] | [((119, 150), 'glob.glob', 'glob', (['"""./web/templates/**.html"""'], {}), "('./web/templates/**.html')\n", (123, 150), False, 'from glob import glob\n')] |
"""
Django settings for addr project.
Generated by 'django-admin startproject' using Django 1.11.29.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from datetime import timedelta
from dotenv import load_dotenv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '<KEY>'
load_dotenv('/addr/.env')
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
PROJ_ENV = os.getenv("PROJ_ENV")
ALLOWED_HOSTS = ['*']
PROJECT_ROOT_PATH = '/addr/src/'
ADDR_TEMPLATE_FOLDER = PROJECT_ROOT_PATH + '/static/html'
ADDR_STATIC_ROOT = PROJECT_ROOT_PATH + '/static/'
ADDR_STATIC_FOLDER = PROJECT_ROOT_PATH + '/static/html'
DJANGO_LOGFILE = '/addr/ops/logs/uwsgi/django.log'
# Application definition
INSTALLED_APPS = []
if PROJ_ENV == 'dev':
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'adr',
'rest_framework',
'rest_framework.authtoken',
'corsheaders'
]
else:
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'adr',
'rest_framework',
'rest_framework.authtoken',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
],
# 'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.TokenAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
# ),
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=240),
'REFRESH_TOKEN_LIFETIME': timedelta(days=6),
'SIGNING_KEY': 's1d'
}
MIDDLEWARE = []
if PROJ_ENV == 'dev':
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
else:
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = True
ROOT_URLCONF = 'addr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ADDR_TEMPLATE_FOLDER],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'addr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.postgresql_psycopg2'),
'NAME': os.environ.get('POSTGRES_DB', 'addrmain'),
'USER': os.environ.get('POSTGRES_USER', 'addruser'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', ''),
'HOST': os.environ.get('POSTGRES_SVCNM', os.environ.get('POSTGRES_HOST', 'addr_database')),
'PORT': os.environ.get('POSTGRES_PORT', '5432'),
},
# 'logs': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'logs',
# 'USER': os.environ.get('PGUSER', 'dartuser'),
# 'PASSWORD': os.environ.get('PGPASSWORD', ''),
# 'HOST': os.environ.get('PGHOST', 'localhost'),
# 'PORT': os.environ.get('PGPORT', '5432'),
# },
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Australia/Melbourne'
USE_I18N = True
USE_L10N = True
# USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
ADDR_STATIC_ROOT,
]
DATA_UPLOAD_MAX_MEMORY_SIZE = 5242880
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 5242880
| [
"os.getenv",
"os.path.join",
"os.environ.get",
"dotenv.load_dotenv",
"os.path.abspath",
"datetime.timedelta"
] | [((754, 779), 'dotenv.load_dotenv', 'load_dotenv', (['"""/addr/.env"""'], {}), "('/addr/.env')\n", (765, 779), False, 'from dotenv import load_dotenv\n'), ((793, 823), 'os.getenv', 'os.getenv', (['"""DJANGO_SECRET_KEY"""'], {}), "('DJANGO_SECRET_KEY')\n", (802, 823), False, 'import os\n'), ((916, 937), 'os.getenv', 'os.getenv', (['"""PROJ_ENV"""'], {}), "('PROJ_ENV')\n", (925, 937), False, 'import os\n'), ((2300, 2322), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(240)'}), '(minutes=240)\n', (2309, 2322), False, 'from datetime import timedelta\n'), ((2354, 2371), 'datetime.timedelta', 'timedelta', ([], {'days': '(6)'}), '(days=6)\n', (2363, 2371), False, 'from datetime import timedelta\n'), ((5780, 5812), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (5792, 5812), False, 'import os\n'), ((497, 522), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (512, 522), False, 'import os\n'), ((4128, 4203), 'os.environ.get', 'os.environ.get', (['"""DATABASE_ENGINE"""', '"""django.db.backends.postgresql_psycopg2"""'], {}), "('DATABASE_ENGINE', 'django.db.backends.postgresql_psycopg2')\n", (4142, 4203), False, 'import os\n'), ((4221, 4262), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_DB"""', '"""addrmain"""'], {}), "('POSTGRES_DB', 'addrmain')\n", (4235, 4262), False, 'import os\n'), ((4280, 4323), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_USER"""', '"""addruser"""'], {}), "('POSTGRES_USER', 'addruser')\n", (4294, 4323), False, 'import os\n'), ((4345, 4384), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_PASSWORD"""', '""""""'], {}), "('POSTGRES_PASSWORD', '')\n", (4359, 4384), False, 'import os\n'), ((4502, 4541), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_PORT"""', '"""5432"""'], {}), "('POSTGRES_PORT', '5432')\n", (4516, 4541), False, 'import os\n'), ((4435, 4483), 'os.environ.get', 'os.environ.get', (['"""POSTGRES_HOST"""', '"""addr_database"""'], {}), "('POSTGRES_HOST', 'addr_database')\n", (4449, 4483), False, 'import os\n')] |
from bs4 import BeautifulSoup
import requests
import pymongo
from splinter import Browser
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import pandas as pd
def init_browser():
executable_path = {"executable_path": "chromedriver"}
return Browser("chrome", **executable_path, headless = False)
def scrape():
browser = init_browser()
mars_dict = {}
url = "https://redplanetscience.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
latest_news_title = soup.find("div", class_ = "content_title").text
latest_news_paragraph = soup.find("div", class_ = "article_teaser_body").text
url = "https://spaceimages-mars.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
featured_image_url = "https://spaceimages-mars.com/image/featured/mars1.jpg"
browser.visit(featured_image_url)
url = "https://galaxyfacts-mars.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
mars_facts_table = pd.read_html(url)[1]
df = mars_facts_table
html_table = df.to_html()
url = "https://marshemispheres.com/"
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
hemisphere_image_urls = []
links = browser.find_by_css("a.product-item img")
for i in range(len(links)):
hemisphere = {}
browser.find_by_css("a.product-item img")[i].click()
sample = browser.links.find_by_text("Sample").first
hemisphere["img_url"] = sample["href"]
hemisphere["title"] = browser.find_by_css("h2.title").text
hemisphere_image_urls.append(hemisphere)
browser.back()
mars_data = {
"latest_news_title": latest_news_title,
"latest_news_paragraph": latest_news_paragraph,
"featured_image_url": featured_image_url,
"html_table": html_table,
"hemisphere_image_urls": hemisphere_image_urls}
browser.quit()
return mars_data | [
"bs4.BeautifulSoup",
"splinter.Browser",
"pandas.read_html"
] | [((295, 347), 'splinter.Browser', 'Browser', (['"""chrome"""'], {'headless': '(False)'}), "('chrome', **executable_path, headless=False)\n", (302, 347), False, 'from splinter import Browser\n'), ((523, 557), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (536, 557), False, 'from bs4 import BeautifulSoup\n'), ((830, 864), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (843, 864), False, 'from bs4 import BeautifulSoup\n'), ((1096, 1130), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1109, 1130), False, 'from bs4 import BeautifulSoup\n'), ((1347, 1381), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1360, 1381), False, 'from bs4 import BeautifulSoup\n'), ((1161, 1178), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (1173, 1178), True, 'import pandas as pd\n')] |
"""
Tests the DQM Server class
"""
import json
import os
import threading
import pytest
import requests
import qcfractal.interface as ptl
from qcfractal import FractalServer, FractalSnowflake, FractalSnowflakeHandler
from qcfractal.testing import (
await_true,
find_open_port,
pristine_loop,
test_server,
using_geometric,
using_rdkit,
using_torsiondrive,
)
meta_set = {"errors", "n_inserted", "success", "duplicates", "error_description", "validation_errors"}
def test_server_information(test_server):
client = ptl.FractalClient(test_server)
server_info = client.server_information()
assert {"name", "heartbeat_frequency", "counts"} <= server_info.keys()
assert server_info["counts"].keys() >= {"molecule", "kvstore", "result", "collection"}
def test_storage_socket(test_server):
storage_api_addr = test_server.get_address() + "collection" # Targets and endpoint in the FractalServer
storage = {
"collection": "TorsionDriveRecord",
"name": "Torsion123",
"something": "else",
"array": ["54321"],
"visibility": True,
"view_available": False,
"group": "default",
}
# Cast collection type to lower since the server-side does it anyways
storage["collection"] = storage["collection"].lower()
r = requests.post(storage_api_addr, json={"meta": {}, "data": storage})
assert r.status_code == 200, r.reason
pdata = r.json()
assert pdata["meta"].keys() == meta_set
assert pdata["meta"]["n_inserted"] == 1
r = requests.get(
storage_api_addr, json={"meta": {}, "data": {"collection": storage["collection"], "name": storage["name"]}}
)
print(r.content)
assert r.status_code == 200, r.reason
pdata = r.json()
col_id = pdata["data"][0].pop("id")
# got a default values when created
pdata["data"][0].pop("tags", None)
pdata["data"][0].pop("tagline", None)
pdata["data"][0].pop("provenance", None)
pdata["data"][0].pop("view_url_hdf5", None)
pdata["data"][0].pop("view_url_plaintext", None)
pdata["data"][0].pop("view_metadata", None)
pdata["data"][0].pop("description", None)
assert pdata["data"][0] == storage
# Test collection id sub-resource
r = requests.get(f"{storage_api_addr}/{col_id}", json={"meta": {}, "data": {}}).json()
assert r["meta"]["success"] is True
assert len(r["data"]) == 1
assert r["data"][0]["id"] == col_id
r = requests.get(f"{storage_api_addr}/{col_id}", json={"meta": {}, "data": {"name": "wrong name"}}).json()
assert r["meta"]["success"] is True
assert len(r["data"]) == 0
def test_bad_collection_get(test_server):
for storage_api_addr in [
test_server.get_address() + "collection/1234/entry",
test_server.get_address() + "collection/1234/value",
test_server.get_address() + "collection/1234/list",
test_server.get_address() + "collection/1234/molecule",
]:
r = requests.get(storage_api_addr, json={"meta": {}, "data": {}})
assert r.status_code == 200, f"{r.reason} {storage_api_addr}"
assert r.json()["meta"]["success"] is False, storage_api_addr
def test_bad_collection_post(test_server):
storage = {
"collection": "TorsionDriveRecord",
"name": "Torsion123",
"something": "else",
"array": ["54321"],
"visibility": True,
"view_available": False,
}
# Cast collection type to lower since the server-side does it anyways
storage["collection"] = storage["collection"].lower()
for storage_api_addr in [
test_server.get_address() + "collection/1234",
test_server.get_address() + "collection/1234/value",
test_server.get_address() + "collection/1234/entry",
test_server.get_address() + "collection/1234/list",
test_server.get_address() + "collection/1234/molecule",
]:
r = requests.post(storage_api_addr, json={"meta": {}, "data": storage})
assert r.status_code == 200, r.reason
assert r.json()["meta"]["success"] is False
def test_bad_view_endpoints(test_server):
"""Tests that certain misspellings of the view endpoints result in 404s"""
addr = test_server.get_address()
assert requests.get(addr + "collection//value").status_code == 404
assert requests.get(addr + "collection/234/values").status_code == 404
assert requests.get(addr + "collections/234/value").status_code == 404
assert requests.get(addr + "collection/234/view/value").status_code == 404
assert requests.get(addr + "collection/value").status_code == 404
assert requests.get(addr + "collection/S22").status_code == 404
@pytest.mark.slow
def test_snowflakehandler_restart():
with FractalSnowflakeHandler() as server:
server.client()
proc1 = server._qcfractal_proc
server.restart()
server.client()
proc2 = server._qcfractal_proc
assert proc1 != proc2
assert proc1.poll() is not None
assert proc2.poll() is not None
def test_snowflakehandler_log():
with FractalSnowflakeHandler() as server:
proc = server._qcfractal_proc
assert "No SSL files passed in" in server.show_log(show=False, nlines=100)
assert "0 task" not in server.show_log(show=False, nlines=100)
assert proc.poll() is not None
@pytest.mark.slow
@using_geometric
@using_torsiondrive
@using_rdkit
def test_snowflake_service():
with FractalSnowflakeHandler() as server:
client = server.client()
hooh = ptl.data.get_molecule("hooh.json")
# Geometric options
tdinput = {
"initial_molecule": [hooh],
"keywords": {"dihedrals": [[0, 1, 2, 3]], "grid_spacing": [90]},
"optimization_spec": {"program": "geometric", "keywords": {"coordsys": "tric"}},
"qc_spec": {"driver": "gradient", "method": "UFF", "basis": None, "keywords": None, "program": "rdkit"},
}
ret = client.add_service([tdinput])
def geometric_await():
td = client.query_procedures(id=ret.ids)[0]
return td.status == "COMPLETE"
assert await_true(60, geometric_await, period=2), client.query_procedures(id=ret.ids)[0]
| [
"qcfractal.testing.test_server.get_address",
"requests.post",
"qcfractal.interface.FractalClient",
"qcfractal.interface.data.get_molecule",
"qcfractal.FractalSnowflakeHandler",
"requests.get",
"qcfractal.testing.await_true"
] | [((550, 580), 'qcfractal.interface.FractalClient', 'ptl.FractalClient', (['test_server'], {}), '(test_server)\n', (567, 580), True, 'import qcfractal.interface as ptl\n'), ((1327, 1394), 'requests.post', 'requests.post', (['storage_api_addr'], {'json': "{'meta': {}, 'data': storage}"}), "(storage_api_addr, json={'meta': {}, 'data': storage})\n", (1340, 1394), False, 'import requests\n'), ((1556, 1681), 'requests.get', 'requests.get', (['storage_api_addr'], {'json': "{'meta': {}, 'data': {'collection': storage['collection'], 'name': storage[\n 'name']}}"}), "(storage_api_addr, json={'meta': {}, 'data': {'collection':\n storage['collection'], 'name': storage['name']}})\n", (1568, 1681), False, 'import requests\n'), ((4225, 4250), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (4248, 4250), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((858, 883), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (881, 883), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((2981, 3042), 'requests.get', 'requests.get', (['storage_api_addr'], {'json': "{'meta': {}, 'data': {}}"}), "(storage_api_addr, json={'meta': {}, 'data': {}})\n", (2993, 3042), False, 'import requests\n'), ((3925, 3992), 'requests.post', 'requests.post', (['storage_api_addr'], {'json': "{'meta': {}, 'data': storage}"}), "(storage_api_addr, json={'meta': {}, 'data': storage})\n", (3938, 3992), False, 'import requests\n'), ((4757, 4782), 'qcfractal.FractalSnowflakeHandler', 'FractalSnowflakeHandler', ([], {}), '()\n', (4780, 4782), False, 'from qcfractal import FractalServer, FractalSnowflake, FractalSnowflakeHandler\n'), ((5091, 5116), 'qcfractal.FractalSnowflakeHandler', 'FractalSnowflakeHandler', ([], {}), '()\n', (5114, 5116), False, 'from qcfractal import FractalServer, FractalSnowflake, FractalSnowflakeHandler\n'), ((5466, 5491), 'qcfractal.FractalSnowflakeHandler', 'FractalSnowflakeHandler', ([], {}), '()\n', (5489, 5491), False, 'from qcfractal import FractalServer, FractalSnowflake, FractalSnowflakeHandler\n'), ((5553, 5587), 'qcfractal.interface.data.get_molecule', 'ptl.data.get_molecule', (['"""hooh.json"""'], {}), "('hooh.json')\n", (5574, 5587), True, 'import qcfractal.interface as ptl\n'), ((6166, 6207), 'qcfractal.testing.await_true', 'await_true', (['(60)', 'geometric_await'], {'period': '(2)'}), '(60, geometric_await, period=2)\n', (6176, 6207), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((2265, 2340), 'requests.get', 'requests.get', (['f"""{storage_api_addr}/{col_id}"""'], {'json': "{'meta': {}, 'data': {}}"}), "(f'{storage_api_addr}/{col_id}', json={'meta': {}, 'data': {}})\n", (2277, 2340), False, 'import requests\n'), ((2468, 2568), 'requests.get', 'requests.get', (['f"""{storage_api_addr}/{col_id}"""'], {'json': "{'meta': {}, 'data': {'name': 'wrong name'}}"}), "(f'{storage_api_addr}/{col_id}', json={'meta': {}, 'data': {\n 'name': 'wrong name'}})\n", (2480, 2568), False, 'import requests\n'), ((2724, 2749), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (2747, 2749), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((2785, 2810), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (2808, 2810), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((2846, 2871), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (2869, 2871), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((2906, 2931), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (2929, 2931), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((3613, 3638), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (3636, 3638), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((3668, 3693), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (3691, 3693), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((3729, 3754), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (3752, 3754), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((3790, 3815), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (3813, 3815), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((3850, 3875), 'qcfractal.testing.test_server.get_address', 'test_server.get_address', ([], {}), '()\n', (3873, 3875), False, 'from qcfractal.testing import await_true, find_open_port, pristine_loop, test_server, using_geometric, using_rdkit, using_torsiondrive\n'), ((4263, 4303), 'requests.get', 'requests.get', (["(addr + 'collection//value')"], {}), "(addr + 'collection//value')\n", (4275, 4303), False, 'import requests\n'), ((4334, 4378), 'requests.get', 'requests.get', (["(addr + 'collection/234/values')"], {}), "(addr + 'collection/234/values')\n", (4346, 4378), False, 'import requests\n'), ((4409, 4453), 'requests.get', 'requests.get', (["(addr + 'collections/234/value')"], {}), "(addr + 'collections/234/value')\n", (4421, 4453), False, 'import requests\n'), ((4484, 4532), 'requests.get', 'requests.get', (["(addr + 'collection/234/view/value')"], {}), "(addr + 'collection/234/view/value')\n", (4496, 4532), False, 'import requests\n'), ((4563, 4602), 'requests.get', 'requests.get', (["(addr + 'collection/value')"], {}), "(addr + 'collection/value')\n", (4575, 4602), False, 'import requests\n'), ((4633, 4670), 'requests.get', 'requests.get', (["(addr + 'collection/S22')"], {}), "(addr + 'collection/S22')\n", (4645, 4670), False, 'import requests\n')] |
import argparse
import os
import os.path as osp
import cv2
import numpy as np
from scipy.stats import multivariate_normal
from scipy.stats import norm
import matplotlib
# matplotlib.use('agg')
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import subprocess
import shutil
import chainer
from chainer import training
from chainer.training import extensions
from chainer.dataset import concat_examples
from chainer.backends.cuda import to_cpu
import chainer.functions as F
from chainer import serializers
import net_200x200 as net
import data_generator
from config_parser import ConfigParser
from utils import *
def save_reconstruction_arrays(data, model, folder_name="."):
print("Clear Images from Last Reconstructions\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Saving Array RECONSTRUCTIONS\n")
(train_b0, train_b1) = data
no_images = 10
train_ind = np.linspace(0, len(train_b0) - 1, no_images, dtype=int)
result = model(train_b0[train_ind], train_b1[train_ind])
gt_b0 = np.swapaxes(train_b0[train_ind], 1, 3)
gt_b1 = np.swapaxes(train_b1[train_ind], 1, 3)
rec_b0 = np.swapaxes(result[0].data, 1, 3)
rec_b1 = np.swapaxes(result[1].data, 1, 3)
output = {"gt_b0": gt_b0, "gt_b1": gt_b1, 'rec_b0': rec_b0, 'rec_b1': rec_b1}
np.savez(os.path.join("result", "reconstruction_arrays/train" + ".npz"), **output)
def eval_seen_data(data, model, groups, folder_name=".", pairs=None):
print("Clear Images from Last Seen Scatter\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on SEEN data\n")
(data_b0, data_b1) = data
n = 100
every_nth = len(data_b0) / n
if every_nth == 0:
every_nth = 1
axis_ranges = [-5, 5]
for group_key in groups:
for label in groups[group_key]:
print(("Visualising label:\t{0}, Group:\t{1}".format(label, group_key)))
indecies = [i for i, x in enumerate(train_labels) if x == label]
filtered_data_b0 = data_b0.take(indecies, axis=0)[::every_nth]
filtered_data_b1 = data_b1.take(indecies, axis=0)[::every_nth]
latent_mu = model.get_latent(filtered_data_b0, filtered_data_b1).data
pairs = [(0,1), (0,2), (1,2)]
for pair in pairs:
plt.scatter(latent_mu[:, pair[0]], latent_mu[:, pair[1]], c='red', label=label, alpha=0.75)
plt.grid()
# major axes
plt.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
plt.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
plt.xlim(axis_ranges[0], axis_ranges[1])
plt.ylim(axis_ranges[0], axis_ranges[1])
plt.xlabel("Z_" + str(pair[0]))
plt.ylabel("Z_" + str(pair[1]))
plt.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
plt.savefig(osp.join(folder_name, "group_" + str(group_key) + "_" + label + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
plt.close()
def eval_seen_data_single(data, model, labels=[], folder_name=".", pairs=None):
print("Clear Images from Last Seen Scatter Single\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on SEEN SINGLE data\n")
(data_b0, data_b1) = data
axis_ranges = [-15, 15]
# pairs = [(0,1)]
n = 100
every_nth = len(data_b0) / n
if every_nth == 0:
every_nth = 1
filtered_data_b0 = data_b0.take(list(range(len(data_b0))), axis=0)[::every_nth]
filtered_data_b1 = data_b1.take(list(range(len(data_b1))), axis=0)[::every_nth]
labels = labels[::every_nth]
latent = np.array(model.get_latent(filtered_data_b0, filtered_data_b1))
filtered_data_b0 = np.swapaxes(filtered_data_b0, 1, 3)
filtered_data_b1 = np.swapaxes(filtered_data_b1, 1, 3)
for i in range(0, len(latent[0]), 33):
fig = plt.figure()
fig.canvas.set_window_title(labels[i])
ax = fig.add_subplot(1, len(pairs) + 1, 1, projection='3d')
points = filtered_data_b0[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0 = filtered_points[...,0][::3]
ys_0 = filtered_points[...,1][::3]
zs_0 = filtered_points[...,2][::3]
ax.scatter(xs_0, ys_0, zs_0, c='r', alpha=0.5)
points = filtered_data_b1[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1 = filtered_points[...,0][::3]
ys_1 = filtered_points[...,1][::3]
zs_1 = filtered_points[...,2][::3]
ax.scatter(xs_1, ys_1, zs_1, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(1, len(pairs) + 1, j + 2)
ax.scatter(latent[pair[0], i], latent[pair[1], i], c='red', label="unseen", alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
ax.set_xlim(axis_ranges[0], axis_ranges[1])
ax.set_ylim(axis_ranges[0], axis_ranges[1])
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
# ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
# plt.savefig(osp.join(folder_name, str(i) + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
# plt.close()
plt.show()
def eval_unseen_data(data, model, folder_name=".", pairs=None):
print("Clear Images from Last Unseen Scatter\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on UNSEEN data\n")
(data_b0, data_b1) = data
axis_ranges = [-5, 5]
# pairs = [(0,1), (0,2), (1,2)]
# pairs = [(0,1)]
# n = 100
# every_nth = len(data_b0) / n
# if every_nth == 0:
# every_nth = 1
every_nth = 2
filtered_data_b0 = data_b0.take(list(range(len(data_b0))), axis=0)[::every_nth]
filtered_data_b1 = data_b1.take(list(range(len(data_b1))), axis=0)[::every_nth]
latent = np.array(model.get_latent(filtered_data_b0, filtered_data_b1))
latent_flipped = np.array(model.get_latent(filtered_data_b1, filtered_data_b0))
filtered_data_b0 = np.swapaxes(filtered_data_b0, 1, 3)
filtered_data_b1 = np.swapaxes(filtered_data_b1, 1, 3)
for i in range(len(filtered_data_b0)):
print(("{0}/{1}".format(i, len(latent[0]))))
fig = plt.figure()
ax = fig.add_subplot(2, 4, 1, projection='3d')
points = filtered_data_b0[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0 = filtered_points[...,0][::3]
ys_0 = filtered_points[...,1][::3]
zs_0 = filtered_points[...,2][::3]
ax.scatter(xs_0, ys_0, zs_0, c='r', alpha=0.5)
points = filtered_data_b1[i].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1 = filtered_points[...,0][::3]
ys_1 = filtered_points[...,1][::3]
zs_1 = filtered_points[...,2][::3]
ax.scatter(xs_1, ys_1, zs_1, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, 4, j + 2)
ax.scatter(latent[pair[0], i], latent[pair[1], i], c='red', label="unseen", alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
# ax.set_xlim(axis_ranges[0], axis_ranges[1])
# ax.set_ylim(axis_ranges[0], axis_ranges[1])
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
# ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
ax = fig.add_subplot(2, 4, 5, projection='3d')
ax.scatter(xs_1, ys_1, zs_1, c='r', alpha=0.5)
ax.scatter(xs_0, ys_0, zs_0, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, 4, j + 6)
ax.scatter(latent_flipped[pair[0], i], latent_flipped[pair[1], i], c='red', label="unseen", alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
# ax.set_xlim(axis_ranges[0], axis_ranges[1])
# ax.set_ylim(axis_ranges[0], axis_ranges[1])
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
# ax.legend(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)
# plt.savefig(osp.join(folder_name, str(i) + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
# plt.close()
plt.show()
def eval_unseen_time(data, model, folder_name=".", pairs=None):
print("Clear Images from Last Unseen Scatter\n")
all_files = list([filename for filename in os.listdir(folder_name) if '.' in filename])
list(map(lambda x : os.remove(folder_name + x), all_files))
print("Evaluating on UNSEEN data through time\n")
cmap = plt.cm.get_cmap('cool')
(data_b0, data_b1) = data
axis_ranges = [-20, 20]
# pairs = [(0,1), (0,2), (1,2)]
pairs = [(0,1), (2,3)]
npz_size = 50
npz_files = 4
for k in range(npz_files):
filtered_data_b0 = data_b0.take(list(range(len(data_b0))), axis=0)[k * npz_size : (k+1) * npz_size - 1]
filtered_data_b1 = data_b1.take(list(range(len(data_b1))), axis=0)[k * npz_size : (k+1) * npz_size - 1]
latent = np.array(model.get_latent(filtered_data_b0, filtered_data_b1))
latent_flipped = np.array(model.get_latent(filtered_data_b1, filtered_data_b0))
filtered_data_b0 = np.swapaxes(filtered_data_b0, 1, 3)
filtered_data_b1 = np.swapaxes(filtered_data_b1, 1, 3)
print(("{0}/{1}".format(k, npz_files)))
fig = plt.figure()
###################
#### FIRST ROW ####
###################
ax = fig.add_subplot(2, len(pairs) + 2, 1, projection='3d')
points = filtered_data_b0[1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0_first = filtered_points[...,0][::3]
ys_0_first = filtered_points[...,1][::3]
zs_0_first = filtered_points[...,2][::3]
ax.scatter(xs_0_first, ys_0_first, zs_0_first, c='r', alpha=0.5)
points = filtered_data_b1[1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1_first = filtered_points[...,0][::3]
ys_1_first = filtered_points[...,1][::3]
zs_1_first = filtered_points[...,2][::3]
ax.scatter(xs_1_first, ys_1_first, zs_1_first, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
ax = fig.add_subplot(2, len(pairs) + 2, 2, projection='3d')
points = filtered_data_b0[-1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_0_last = filtered_points[...,0][::3]
ys_0_last = filtered_points[...,1][::3]
zs_0_last = filtered_points[...,2][::3]
ax.scatter(xs_0_last, ys_0_last, zs_0_last, c='r', alpha=0.5)
points = filtered_data_b1[-1].reshape(200*200,3)
filtered_points = np.array(list([row for row in points if [point for point in row if (point != [0,0,0]).all()]]))
xs_1_last = filtered_points[...,0][::3]
ys_1_last = filtered_points[...,1][::3]
zs_1_last = filtered_points[...,2][::3]
ax.scatter(xs_1_last, ys_1_last, zs_1_last, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, len(pairs) + 2, j + 3)
for i in range(len(latent[0])):
x = (latent[pair[0], i], latent[pair[1], i])
rgba = cmap(i/float(npz_size))
ax.scatter(x[0], x[1], c=[rgba[:3]], label="unseen", s=30, alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
ax.set_xlim(axis_ranges[0], axis_ranges[1])
ax.set_ylim(axis_ranges[0], axis_ranges[1])
##################
### SECOND ROW ###
##################
ax = fig.add_subplot(2, len(pairs) + 2, len(pairs) + 3, projection='3d')
ax.scatter(xs_1_first, ys_1_first, zs_1_first, c='r', alpha=0.5)
ax.scatter(xs_0_first, ys_0_first, zs_0_first, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
ax = fig.add_subplot(2, len(pairs) + 2, len(pairs) + 4, projection='3d')
ax.scatter(xs_1_last, ys_1_last, zs_1_last, c='r', alpha=0.5)
ax.scatter(xs_0_last, ys_0_last, zs_0_last, c='c', alpha=0.5)
ax.set_xlabel('X', fontweight="bold")
ax.set_ylabel('Y', fontweight="bold")
ax.set_zlabel('Z', fontweight="bold")
for j, pair in enumerate(pairs):
ax = fig.add_subplot(2, len(pairs) + 2, j + len(pairs) + 5)
for i in range(len(latent_flipped[0])):
x = (latent_flipped[pair[0], i], latent_flipped[pair[1], i])
rgba = cmap(i/float(npz_size))
ax.scatter(x[0], x[1], c=[rgba[:3]], label="unseen", s=30, alpha=0.75)
ax.grid()
# major axes
ax.plot([axis_ranges[0], axis_ranges[1]], [0,0], 'k')
ax.plot([0,0], [axis_ranges[0], axis_ranges[1]], 'k')
ax.set_xlabel("Z_" + str(pair[0]))
ax.set_ylabel("Z_" + str(pair[1]))
ax.set_xlim(axis_ranges[0], axis_ranges[1])
ax.set_ylim(axis_ranges[0], axis_ranges[1])
# plt.savefig(osp.join(folder_name, "npz_" + str(k) + "_Z_" + str(pair[0]) + "_Z_" + str(pair[1])), bbox_inches="tight")
# plt.close()
plt.show()
if __name__ == "__main__":
ignore = ["unlabelled", "train"]
generator = data_generator.DataGenerator()
train_b0, train_b1, train_labels, train_concat, train_vectors, test_b0, test_b1, test_labels, test_concat, test_vectors, unseen_b0, unseen_b1,\
unseen_labels, groups = generator.generate_dataset(ignore=ignore, args=None)
print('\n###############################################')
print("DATA_LOADED")
print(("# Training Branch 0: \t\t{0}".format(train_b0.shape)))
print(("# Training Branch 1: \t\t{0}".format(train_b1.shape)))
print(("# Training labels: \t{0}".format(set(train_labels))))
print(("# Training labels: \t{0}".format(train_labels.shape)))
print(("# Training concat: \t{0}".format(len(train_concat))))
print(("# Training vectors: \t{0}".format(train_vectors.shape)))
print(("# Testing Branch 0: \t\t{0}".format(test_b0.shape)))
print(("# Testing Branch 1: \t\t{0}".format(test_b1.shape)))
print(("# Testing labels: \t{0}".format(set(test_labels))))
print(("# Testing concat: \t{0}".format(len(test_concat))))
print(("# Testing labels: \t{0}".format(test_labels.shape)))
print(("# Testing vectors: \t{0}".format(test_vectors.shape)))
print(("# Unseen Branch 0: \t\t{0}".format(unseen_b0.shape)))
print(("# Unseen Branch 1: \t\t{0}".format(unseen_b1.shape)))
print(("# Unseen labels: \t{0}".format(set(unseen_labels))))
print(("\n# Groups: \t{0}".format(groups)))
print('###############################################\n')
model = net.Conv_Siam_VAE(train_b0.shape[1], train_b1.shape[1], n_latent=8, groups=groups, alpha=1, beta=1, gamma=1)
serializers.load_npz("result/models/final.model", model)
model.to_cpu()
pairs = list(itertools.combinations(list(range(len(groups))), 2))
# save the pointcloud reconstructions
# save_reconstruction_arrays((train_b0, train_b0), model, folder_name="result/reconstruction_arrays/")
# evaluate on the data that was seen during trainig
# eval_seen_data((train_b0, train_b1), model, groups, folder_name="eval/scatter/seen/", pairs=pairs)
# evaluate on the data that was seen during trainig one by one + 3D
# eval_seen_data_single((test_b0, test_b1), model, labels=test_labels, folder_name="eval/scatter/seen_single/", pairs=pairs)
# evaluate on the data that was NOT seen during trainig
# eval_unseen_data((unseen_b0, unseen_b1), model, folder_name="eval/scatter/unseen/", pairs=pairs)
# evaluate the unseen data through time
eval_unseen_time((unseen_b0, unseen_b1), model, folder_name="eval/scatter/unseen_time/", pairs=pairs) | [
"net_200x200.Conv_Siam_VAE",
"os.listdir",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.swapaxes",
"os.remove",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"data_generator.DataGenerator",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.cm.get_cmap",
"... | [((1128, 1166), 'numpy.swapaxes', 'np.swapaxes', (['train_b0[train_ind]', '(1)', '(3)'], {}), '(train_b0[train_ind], 1, 3)\n', (1139, 1166), True, 'import numpy as np\n'), ((1176, 1214), 'numpy.swapaxes', 'np.swapaxes', (['train_b1[train_ind]', '(1)', '(3)'], {}), '(train_b1[train_ind], 1, 3)\n', (1187, 1214), True, 'import numpy as np\n'), ((1226, 1259), 'numpy.swapaxes', 'np.swapaxes', (['result[0].data', '(1)', '(3)'], {}), '(result[0].data, 1, 3)\n', (1237, 1259), True, 'import numpy as np\n'), ((1270, 1303), 'numpy.swapaxes', 'np.swapaxes', (['result[1].data', '(1)', '(3)'], {}), '(result[1].data, 1, 3)\n', (1281, 1303), True, 'import numpy as np\n'), ((3781, 3816), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b0', '(1)', '(3)'], {}), '(filtered_data_b0, 1, 3)\n', (3792, 3816), True, 'import numpy as np\n'), ((3837, 3872), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b1', '(1)', '(3)'], {}), '(filtered_data_b1, 1, 3)\n', (3848, 3872), True, 'import numpy as np\n'), ((6370, 6405), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b0', '(1)', '(3)'], {}), '(filtered_data_b0, 1, 3)\n', (6381, 6405), True, 'import numpy as np\n'), ((6426, 6461), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b1', '(1)', '(3)'], {}), '(filtered_data_b1, 1, 3)\n', (6437, 6461), True, 'import numpy as np\n'), ((9265, 9288), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""cool"""'], {}), "('cool')\n", (9280, 9288), True, 'import matplotlib.pyplot as plt\n'), ((14066, 14096), 'data_generator.DataGenerator', 'data_generator.DataGenerator', ([], {}), '()\n', (14094, 14096), False, 'import data_generator\n'), ((15463, 15576), 'net_200x200.Conv_Siam_VAE', 'net.Conv_Siam_VAE', (['train_b0.shape[1]', 'train_b1.shape[1]'], {'n_latent': '(8)', 'groups': 'groups', 'alpha': '(1)', 'beta': '(1)', 'gamma': '(1)'}), '(train_b0.shape[1], train_b1.shape[1], n_latent=8, groups=\n groups, alpha=1, beta=1, gamma=1)\n', (15480, 15576), True, 'import net_200x200 as net\n'), ((15573, 15629), 'chainer.serializers.load_npz', 'serializers.load_npz', (['"""result/models/final.model"""', 'model'], {}), "('result/models/final.model', model)\n", (15593, 15629), False, 'from chainer import serializers\n'), ((1394, 1456), 'os.path.join', 'os.path.join', (['"""result"""', "('reconstruction_arrays/train' + '.npz')"], {}), "('result', 'reconstruction_arrays/train' + '.npz')\n", (1406, 1456), False, 'import os\n'), ((3923, 3935), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3933, 3935), True, 'import matplotlib.pyplot as plt\n'), ((5511, 5521), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5519, 5521), True, 'import matplotlib.pyplot as plt\n'), ((6559, 6571), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6569, 6571), True, 'import matplotlib.pyplot as plt\n'), ((8927, 8937), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8935, 8937), True, 'import matplotlib.pyplot as plt\n'), ((9851, 9886), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b0', '(1)', '(3)'], {}), '(filtered_data_b0, 1, 3)\n', (9862, 9886), True, 'import numpy as np\n'), ((9908, 9943), 'numpy.swapaxes', 'np.swapaxes', (['filtered_data_b1', '(1)', '(3)'], {}), '(filtered_data_b1, 1, 3)\n', (9919, 9943), True, 'import numpy as np\n'), ((9995, 10007), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10005, 10007), True, 'import matplotlib.pyplot as plt\n'), ((13979, 13989), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13987, 13989), True, 'import matplotlib.pyplot as plt\n'), ((797, 820), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (807, 820), False, 'import os\n'), ((863, 889), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (872, 889), False, 'import os\n'), ((1633, 1656), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (1643, 1656), False, 'import os\n'), ((1699, 1725), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (1708, 1725), False, 'import os\n'), ((2373, 2469), 'matplotlib.pyplot.scatter', 'plt.scatter', (['latent_mu[:, pair[0]]', 'latent_mu[:, pair[1]]'], {'c': '"""red"""', 'label': 'label', 'alpha': '(0.75)'}), "(latent_mu[:, pair[0]], latent_mu[:, pair[1]], c='red', label=\n label, alpha=0.75)\n", (2384, 2469), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2479), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2477, 2479), True, 'import matplotlib.pyplot as plt\n'), ((2502, 2557), 'matplotlib.pyplot.plot', 'plt.plot', (['[axis_ranges[0], axis_ranges[1]]', '[0, 0]', '"""k"""'], {}), "([axis_ranges[0], axis_ranges[1]], [0, 0], 'k')\n", (2510, 2557), True, 'import matplotlib.pyplot as plt\n'), ((2561, 2616), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 0]', '[axis_ranges[0], axis_ranges[1]]', '"""k"""'], {}), "([0, 0], [axis_ranges[0], axis_ranges[1]], 'k')\n", (2569, 2616), True, 'import matplotlib.pyplot as plt\n'), ((2621, 2661), 'matplotlib.pyplot.xlim', 'plt.xlim', (['axis_ranges[0]', 'axis_ranges[1]'], {}), '(axis_ranges[0], axis_ranges[1])\n', (2629, 2661), True, 'import matplotlib.pyplot as plt\n'), ((2666, 2706), 'matplotlib.pyplot.ylim', 'plt.ylim', (['axis_ranges[0]', 'axis_ranges[1]'], {}), '(axis_ranges[0], axis_ranges[1])\n', (2674, 2706), True, 'import matplotlib.pyplot as plt\n'), ((2785, 2849), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'bbox_to_anchor': '(1, 1)', 'fontsize': '(14)'}), "(loc='upper left', bbox_to_anchor=(1, 1), fontsize=14)\n", (2795, 2849), True, 'import matplotlib.pyplot as plt\n'), ((3001, 3012), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3010, 3012), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3219), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (3206, 3219), False, 'import os\n'), ((3262, 3288), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (3271, 3288), False, 'import os\n'), ((5684, 5707), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (5694, 5707), False, 'import os\n'), ((5750, 5776), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (5759, 5776), False, 'import os\n'), ((9100, 9123), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (9110, 9123), False, 'import os\n'), ((9166, 9192), 'os.remove', 'os.remove', (['(folder_name + x)'], {}), '(folder_name + x)\n', (9175, 9192), False, 'import os\n')] |
from gurobipy import *
from itertools import combinations
from time import localtime, strftime, time
import config
from fibonew2 import (
AK2exp, InitMatNew, MatroidCompatible, Resol2m, bi, bs, disjoint, rankfinder,
ib, sb)
from timing import endlog, log
def CheckOneAK(mbases,gset,rnk):
'''
We check if the matroid is 1-AK.
We also implement some optimizations that help reduce the number of flats to check.
mbases (dictionary) containing matroids to be checked and their bases
gset (list) is the ground set of the matroids
rnk (int) is the rank of the matroids
Note: observe that the matroids in a particular run need to be of the same size and rank.
'''
start = time()
log("Start Program")
checker = 1
nonAK = 0
oneAK = 0
noAKmats = list()
sfiles = open('runresultAK.txt','a+')
nm = 'File listing checked files from polymatroid extension run using all sets (AK) with some optimizations.'
nm1 = '%' * len(nm)
sfiles.write('{}\n'.format(nm1))
sfiles.write('{}\n'.format(nm))
sfiles.write('{}\n'.format(nm1))
for key in mbases:
counter = 0
begin = time()
log('Start polymatroid extension check (AK) for {} using all sets with some optimizations.'.format(key))
rankd,allranks = rankfinder(mbases[key],gset)
Ar = list()
for i in range(0,len(allranks)-1,2):
if len(allranks[i]) < 2: continue
#if Ar1[i+1] == rnk: continue # not sure why this is here
Ar.append(set([str(it3) for it3 in allranks[i]]))
combs3 = combinations( [i for i in range(len(Ar))], 3)
comb_hlder = list()
################################################
## We remove tuples (U,V,Z) where:
## (i) UV has full rank
## (ii) U and V are subsets of Z
## (iii) UV is a subset of Z
## (iv) Z is the intersection of U and V
## (v) Z is a subset of UV
## (vi) UV and Z are a modular pair
## (vii) UV and Z have zero mutual information
################################################
for combo in combs3:
pre_comb_hlder = list()
cmbs12 = Ar[combo[0]].union(Ar[combo[1]])
excld = set([int(itm) for itm in cmbs12])
ind = allranks.index(excld)
rnk_excld = allranks[ind + 1]
if rnk_excld == rnk: continue
if (Ar[combo[0]].issubset(Ar[combo[2]]) and Ar[combo[1]].issubset(Ar[combo[2]])) or cmbs12.issubset(Ar[combo[2]]): continue
if Ar[combo[2]]==Ar[combo[0]].intersection(Ar[combo[1]]) or cmbs12.issuperset(Ar[combo[2]]): continue
#int_combo01 = [int(item) for item in cmbs12]
set_combo01 = set( [int(item) for item in cmbs12] )
index_combo01 = allranks.index(set_combo01)
rnk_combo01 = allranks[index_combo01+1]
#int_combo2 = [int(item) for item in Ar[combo[2]]]
set_combo2 = set( [int(item) for item in Ar[combo[2]]] )
index_combo2 = allranks.index(set_combo2)
rnk_combo2 = allranks[index_combo2+1]
combo_inters = cmbs12.intersection(Ar[combo[2]])
#int_combointers = [int(item) for item in combo_inters]
set_combointers = set( [int(item) for item in combo_inters] )
index_combointers = allranks.index(set_combointers)
rnk_combointers = allranks[index_combointers+1]
combo_union = cmbs12.union(Ar[combo[2]])
#int_combounion = [int(item) for item in combo_union]
set_combounion = set( [int(item) for item in combo_union] )
index_combounion = allranks.index(set_combounion)
rnk_combounion = allranks[index_combounion+1]
check_modularity = rnk_combo01 + rnk_combo2 - rnk_combounion - rnk_combointers
mutual_info = rnk_combo01 + rnk_combo2 - rnk_combounion
if check_modularity != 0 and mutual_info != 0:
pre_comb_hlder.append(Ar[combo[0]])
pre_comb_hlder.append(Ar[combo[1]])
pre_comb_hlder.append(Ar[combo[2]])
comb_hlder.append(pre_comb_hlder)
print('{} has {} 3-member working combinations.'.format(key,len(comb_hlder)))
for i in range(len(comb_hlder)):
combo1 = comb_hlder[i]
J = combo1[0]
K = combo1[1]
L = combo1[2]
config.p = Model("gurotest")
config.w = config.p.addVars(range(0,2**config.vrbls+1),name="w")
InitMatNew()
MatroidCompatible(mbases[key],gset)
AK2exp(bi(sb(J)), bi(sb(K)), bi(sb(L)), 2**(config.Part))
Resol2m()
if config.p.status == GRB.Status.OPTIMAL: continue
print('{} is a non-AK matroid with violating sets {}, {} and {}.'.format(key,J,K,L))
sfiles.write('{} is a non-AK matroid with violating sets {}, {} and {}.\n'.format(key,J,K,L))
noAKmats.append(key)
counter = 1
break ###### To find ALL combinations that break AK, suppress this line #####
if counter == 0:
oneAK += 1
sfiles.write('{} is an AK matroid.\n'.format(key))
else:
nonAK += 1
endlog(begin)
if checker < len(mbases):
difference = len(mbases)-checker
if difference > 1:
print('{0}done. {1} matroids remaining. Moving to the next one... \n'.format(key,difference))
else:
print('{}done. One matroid left.'.format(key))
else:
print('*********************************************************')
print('Last run made. Program concluded.')
print('*********************************************************')
sfiles.write('\n All {} matroids checked.\n'.format(len(mbases)))
if nonAK == 0:
sfiles.write('All {} matroids are AK.\n'.format(oneAK))
else:
sfiles.write('non_AK_mats = {}\n'.format(noAKmats))
if nonAK == 1 and nonAK != len(mbases):
if oneAK == 1:
sfiles.write('There is one non-AK and {} AK matroid here.\n'.format(oneAK))
else:
sfiles.write('There is one non-AK and {} AK matroids here.\n'.format(oneAK))
elif nonAK > 1 and nonAK < len(mbases):
if oneAK == 1:
sfiles.write('There are {} non-AK matroids, and {} AK matroid here.\n'.format(nonAK,oneAK))
else:
sfiles.write('There are {} non-AK matroids, and {} AK matroids here.\n'.format(nonAK,oneAK))
elif nonAK == len(mbases):
sfiles.write('All {} matroids are non-AK.\n'.format(nonAK))
checker += 1
endlog(start)
| [
"fibonew2.MatroidCompatible",
"fibonew2.sb",
"fibonew2.InitMatNew",
"timing.endlog",
"timing.log",
"fibonew2.Resol2m",
"fibonew2.rankfinder",
"time.time"
] | [((718, 724), 'time.time', 'time', ([], {}), '()\n', (722, 724), False, 'from time import localtime, strftime, time\n'), ((729, 749), 'timing.log', 'log', (['"""Start Program"""'], {}), "('Start Program')\n", (732, 749), False, 'from timing import endlog, log\n'), ((7020, 7033), 'timing.endlog', 'endlog', (['start'], {}), '(start)\n', (7026, 7033), False, 'from timing import endlog, log\n'), ((1168, 1174), 'time.time', 'time', ([], {}), '()\n', (1172, 1174), False, 'from time import localtime, strftime, time\n'), ((1313, 1342), 'fibonew2.rankfinder', 'rankfinder', (['mbases[key]', 'gset'], {}), '(mbases[key], gset)\n', (1323, 1342), False, 'from fibonew2 import AK2exp, InitMatNew, MatroidCompatible, Resol2m, bi, bs, disjoint, rankfinder, ib, sb\n'), ((5399, 5412), 'timing.endlog', 'endlog', (['begin'], {}), '(begin)\n', (5405, 5412), False, 'from timing import endlog, log\n'), ((4656, 4668), 'fibonew2.InitMatNew', 'InitMatNew', ([], {}), '()\n', (4666, 4668), False, 'from fibonew2 import AK2exp, InitMatNew, MatroidCompatible, Resol2m, bi, bs, disjoint, rankfinder, ib, sb\n'), ((4681, 4717), 'fibonew2.MatroidCompatible', 'MatroidCompatible', (['mbases[key]', 'gset'], {}), '(mbases[key], gset)\n', (4698, 4717), False, 'from fibonew2 import AK2exp, InitMatNew, MatroidCompatible, Resol2m, bi, bs, disjoint, rankfinder, ib, sb\n'), ((4801, 4810), 'fibonew2.Resol2m', 'Resol2m', ([], {}), '()\n', (4808, 4810), False, 'from fibonew2 import AK2exp, InitMatNew, MatroidCompatible, Resol2m, bi, bs, disjoint, rankfinder, ib, sb\n'), ((4739, 4744), 'fibonew2.sb', 'sb', (['J'], {}), '(J)\n', (4741, 4744), False, 'from fibonew2 import AK2exp, InitMatNew, MatroidCompatible, Resol2m, bi, bs, disjoint, rankfinder, ib, sb\n'), ((4750, 4755), 'fibonew2.sb', 'sb', (['K'], {}), '(K)\n', (4752, 4755), False, 'from fibonew2 import AK2exp, InitMatNew, MatroidCompatible, Resol2m, bi, bs, disjoint, rankfinder, ib, sb\n'), ((4761, 4766), 'fibonew2.sb', 'sb', (['L'], {}), '(L)\n', (4763, 4766), False, 'from fibonew2 import AK2exp, InitMatNew, MatroidCompatible, Resol2m, bi, bs, disjoint, rankfinder, ib, sb\n')] |
#!/usr/bin/env python
import pytest
import os
import sys
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
from pathlib import Path
from dselib.thread import initTLS
initTLS()
from constants import CONST
from dselib.path import normalizePath
from dselib.dir import GetDSEExtDirectory
from dselib.module import LoadModule, LoadModulePath
from dselib.module import LoadModulePath2, LoadModuleFromPackage
def test_1():
# add tests for LoadModule
mh = LoadModule('argparse')
assert mh is not None
assert hasattr(mh, 'ArgumentParser')
mh = LoadModule('madeup_module_name')
assert mh is None
testmodule = 'dselib.env'
if testmodule in sys.modules:
del sys.modules[testmodule]
assert testmodule not in sys.modules
mh = LoadModule(testmodule)
assert mh is not None
assert testmodule in sys.modules
assert hasattr(mh, 'GetDSEDebug')
def test_2():
# add tests for LoadModulePath2
tempModule = GetDSEExtDirectory() / 'dseutil/dsehelp'
mh = LoadModulePath2(tempModule)
assert mh is not None
assert hasattr(mh, 'GenerateDocs')
mh = LoadModulePath2(tempModule, removeIfLoaded=True)
assert mh is not None
assert hasattr(mh, 'GenerateDocs')
def test_3():
# add tests for LoadModuleFromPackage
testpackage = 'root.dselib'
testmodulestem = 'env'
testmodule = f'{testpackage}.{testmodulestem}'
if testmodule in sys.modules:
del sys.modules[testmodule]
assert testmodule not in sys.modules
mh = LoadModuleFromPackage(testpackage, testmodulestem)
assert mh is not None
assert testmodule in sys.modules
assert hasattr(mh, 'GetDSEDebug')
# def _create_python_module(path, retVal=True):
# logger.debug('writing python module to %s', path)
# with open(path, 'w', encoding='utf-8') as fp:
# fp.write(f'''def test{retVal}():
# \treturn {retVal}
# ''')
def test_4():
# add tests for LoadModulePath
tmpdir = normalizePath(Path('./tmod').resolve())
tempModule = tmpdir / 'p42.py'
logger.debug('tempModule=%s', tempModule)
assert tempModule.is_file()
mh = LoadModulePath(str(tempModule))
assert mh is not None
assert hasattr(mh, 'testTrue')
assert mh.testTrue() is True
assert str(tmpdir) in sys.path
sys.path.remove(str(tmpdir))
sys.modules.pop(str(tempModule.stem)) # not necessary, but so I'll remember this method
tempModule2 = Path(tmpdir) / 'p42false.py'
assert tempModule2.is_file()
mh = LoadModulePath(tempModule2, remove=True)
assert mh is not None
assert str(tmpdir) not in sys.path
assert hasattr(mh, 'testFalse')
assert mh.testFalse() is False
if __name__ == '__main__':
pytest.main(['-k', 'test_module.py'])
| [
"logging.basicConfig",
"logging.getLogger",
"dselib.module.LoadModuleFromPackage",
"pathlib.Path",
"dselib.thread.initTLS",
"dselib.module.LoadModulePath",
"dselib.dir.GetDSEExtDirectory",
"pytest.main",
"dselib.module.LoadModulePath2",
"dselib.module.LoadModule"
] | [((73, 94), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (92, 94), False, 'import logging\n'), ((104, 131), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (121, 131), False, 'import logging\n'), ((223, 232), 'dselib.thread.initTLS', 'initTLS', ([], {}), '()\n', (230, 232), False, 'from dselib.thread import initTLS\n'), ((516, 538), 'dselib.module.LoadModule', 'LoadModule', (['"""argparse"""'], {}), "('argparse')\n", (526, 538), False, 'from dselib.module import LoadModule, LoadModulePath\n'), ((616, 648), 'dselib.module.LoadModule', 'LoadModule', (['"""madeup_module_name"""'], {}), "('madeup_module_name')\n", (626, 648), False, 'from dselib.module import LoadModule, LoadModulePath\n'), ((822, 844), 'dselib.module.LoadModule', 'LoadModule', (['testmodule'], {}), '(testmodule)\n', (832, 844), False, 'from dselib.module import LoadModule, LoadModulePath\n'), ((1065, 1092), 'dselib.module.LoadModulePath2', 'LoadModulePath2', (['tempModule'], {}), '(tempModule)\n', (1080, 1092), False, 'from dselib.module import LoadModulePath2, LoadModuleFromPackage\n'), ((1168, 1216), 'dselib.module.LoadModulePath2', 'LoadModulePath2', (['tempModule'], {'removeIfLoaded': '(True)'}), '(tempModule, removeIfLoaded=True)\n', (1183, 1216), False, 'from dselib.module import LoadModulePath2, LoadModuleFromPackage\n'), ((1569, 1619), 'dselib.module.LoadModuleFromPackage', 'LoadModuleFromPackage', (['testpackage', 'testmodulestem'], {}), '(testpackage, testmodulestem)\n', (1590, 1619), False, 'from dselib.module import LoadModulePath2, LoadModuleFromPackage\n'), ((2567, 2607), 'dselib.module.LoadModulePath', 'LoadModulePath', (['tempModule2'], {'remove': '(True)'}), '(tempModule2, remove=True)\n', (2581, 2607), False, 'from dselib.module import LoadModule, LoadModulePath\n'), ((2776, 2813), 'pytest.main', 'pytest.main', (["['-k', 'test_module.py']"], {}), "(['-k', 'test_module.py'])\n", (2787, 2813), False, 'import pytest\n'), ((1015, 1035), 'dselib.dir.GetDSEExtDirectory', 'GetDSEExtDirectory', ([], {}), '()\n', (1033, 1035), False, 'from dselib.dir import GetDSEExtDirectory\n'), ((2496, 2508), 'pathlib.Path', 'Path', (['tmpdir'], {}), '(tmpdir)\n', (2500, 2508), False, 'from pathlib import Path\n'), ((2041, 2055), 'pathlib.Path', 'Path', (['"""./tmod"""'], {}), "('./tmod')\n", (2045, 2055), False, 'from pathlib import Path\n')] |
from haversine import haversine
def loadOrder(fileName):
print("Loading order from " + fileName + "...")
order = []
# open the file
with open(fileName) as infile:
# read line by line
for line in infile:
# remove newline character from the end
line = line.rstrip()
order.append(int(line))
print("Done... #order: " + str(len(order)))
return order
def doSubmissionFromOrder(order, gifts):
sleighMaxWeight = 1000.0
currentWeight = 0.0
currentTrip = []
trips = []
for giftId in order:
gift = gifts[giftId]
if currentWeight + gift.weight > sleighMaxWeight:
trips.append(currentTrip)
currentWeight = gift.weight
currentTrip = [gift]
else:
currentTrip.append(gift)
currentWeight = currentWeight + gift.weight
trips.append(currentTrip)
# counter = 0
# for trip in trips:
# tripWeight = 0
# for gift in trip:
# counter = counter + 1
# tripWeight = tripWeight + gift.weight
# print(str(tripWeight))
# print("order: " + str(len(order)))
# print("counter: " + str(counter))
return trips
def calculateWRW(trips, gifts):
of = 0.0
for trip in trips:
tripWeight = 0.0
for gift in trip:
tripWeight = tripWeight + gift.weight
# add north pole to first gift weight
of = of + haversine((90.0, 0.0), (trip[0].latitude, trip[0].longitude)) * (tripWeight + 10.0)
# add last gift to north pole
of = of + haversine((trip[len(trip) - 1].latitude, trip[len(trip) - 1].longitude), (90.0, 0.0)) * (10.0)
previousGift = None
for gift in trip:
if previousGift == None:
previousGift = gift
tripWeight = tripWeight - previousGift.weight
else:
currentGift = gift
of = of + haversine((currentGift.latitude, currentGift.longitude), (previousGift.latitude, previousGift.longitude)) * (tripWeight + 10.0)
tripWeight = tripWeight - currentGift.weight
previousGift = currentGift
return of
def createSubmissionFromTrips(trips, fileName):
tripId = 1
output = open(fileName, 'w')
output.write("GiftId,TripId\n")
for trip in trips:
for gift in trip:
output.write(str(gift.ID) + "," + str(tripId) + "\n")
tripId = tripId + 1
output.close()
| [
"haversine.haversine"
] | [((1610, 1671), 'haversine.haversine', 'haversine', (['(90.0, 0.0)', '(trip[0].latitude, trip[0].longitude)'], {}), '((90.0, 0.0), (trip[0].latitude, trip[0].longitude))\n', (1619, 1671), False, 'from haversine import haversine\n'), ((2123, 2233), 'haversine.haversine', 'haversine', (['(currentGift.latitude, currentGift.longitude)', '(previousGift.latitude, previousGift.longitude)'], {}), '((currentGift.latitude, currentGift.longitude), (previousGift.\n latitude, previousGift.longitude))\n', (2132, 2233), False, 'from haversine import haversine\n')] |
from xml.etree import ElementTree as ET
import io
import os.path
import sys
import gpxpy
import builtins
def find_all_tags(fp, tags, progress_callback=None):
parser = ET.XMLPullParser(("start", "end"))
root = None
while True:
chunk = fp.read(1024 * 1024)
if not chunk:
break
parser.feed(chunk)
for event, el in parser.read_events():
if event == "start" and root is None:
root = el
if event == "end" and el.tag in tags:
yield el.tag, el
root.clear()
if progress_callback is not None:
progress_callback(len(chunk))
def convert_xml_to_sqlite(fp, db, progress_callback=None, zipfile=None):
activity_summaries = []
records = []
workout_id = 1
for tag, el in find_all_tags(
fp, {"Record", "Workout", "ActivitySummary"}, progress_callback
):
if tag == "ActivitySummary":
activity_summaries.append(dict(el.attrib))
if len(activity_summaries) >= 100:
db["activity_summary"].insert_all(activity_summaries)
activity_summaries = []
elif tag == "Workout":
el.set("seq", workout_id)
workout_to_db(el, db, zipfile)
workout_id += 1
elif tag == "Record":
record = dict(el.attrib)
for child in el.findall("MetadataEntry"):
record["metadata_" + child.attrib["key"]] = child.attrib["value"]
records.append(record)
if len(records) >= 200:
write_records(records, db)
records = []
el.clear()
if records:
write_records(records, db)
if activity_summaries:
db["activity_summary"].insert_all(activity_summaries)
if progress_callback is not None:
progress_callback(sys.maxsize)
def workout_to_db(workout, db, zf):
record = dict(workout.attrib)
# add metadata entry items as extra keys
for el in workout.findall("MetadataEntry"):
record["metadata_" + el.attrib["key"]] = el.attrib["value"]
# Dump any WorkoutEvent in a nested list for the moment
record["workout_events"] = [el.attrib for el in workout.findall("WorkoutEvent")]
pk = db["workouts"].insert(record, alter=True, hash_id="id").last_pk
points = [
dict(el.attrib, workout_id=pk)
for el in workout.findall("WorkoutRoute/Location")
]
if len(points) == 0:
# Location not embedded, sidecar gpx files used instead
gpx_files = [os.path.join("apple_health_export", *(item.get("path").split("/")))
for item in workout.findall("WorkoutRoute/FileReference")]
# support zip or flat files
for path in gpx_files:
with open_file_or_zip(zf, path) as xml_file:
gpx = parse_gpx(xml_file)
for point in gpx.walk(only_points=False):
point[0].extensions = [etree_to_dict(e) for e in point[0].extensions]
points.append(dict({key: getattr(point[0], key) for key in point[0].__slots__}, workout_id=pk))
if len(points):
db["workout_points"].insert_all(
points, foreign_keys=[("workout_id", "workouts")], batch_size=50
)
def etree_to_dict(t):
d = {t.tag: list(map(etree_to_dict, list(t))) or t.text}
if t.attrib:
d.update({"@attr": t.attrib})
return d
def open_file_or_zip(zf, file):
if zf is not None:
return zf.open(file)
else:
return builtins.open(file, 'rb')
def parse_gpx(xml_file):
doc = io.TextIOWrapper(xml_file, encoding='UTF-8', newline=None)
doc.readline() # skip xml header
return gpxpy.parse("".join(doc.readlines()))
def write_records(records, db):
# We write records into tables based on their types
records_by_type = {}
for record in records:
table = "r{}".format(
record.pop("type")
.replace("HKQuantityTypeIdentifier", "")
.replace("HKCategoryTypeIdentifier", "")
)
records_by_type.setdefault(table, []).append(record)
# Bulk inserts for each one
for table, records_for_table in records_by_type.items():
db[table].insert_all(
records_for_table,
alter=True,
column_order=["startDate", "endDate", "value", "unit"],
batch_size=50,
)
| [
"xml.etree.ElementTree.XMLPullParser",
"builtins.open",
"io.TextIOWrapper"
] | [((174, 208), 'xml.etree.ElementTree.XMLPullParser', 'ET.XMLPullParser', (["('start', 'end')"], {}), "(('start', 'end'))\n", (190, 208), True, 'from xml.etree import ElementTree as ET\n'), ((3616, 3674), 'io.TextIOWrapper', 'io.TextIOWrapper', (['xml_file'], {'encoding': '"""UTF-8"""', 'newline': 'None'}), "(xml_file, encoding='UTF-8', newline=None)\n", (3632, 3674), False, 'import io\n'), ((3553, 3578), 'builtins.open', 'builtins.open', (['file', '"""rb"""'], {}), "(file, 'rb')\n", (3566, 3578), False, 'import builtins\n')] |
# utils/test_kronecker.py
"""Tests for rom_operator_inference.utils._kronecker."""
import pytest
import numpy as np
import rom_operator_inference as opinf
# Index generation for fast self-product kronecker evaluation =================
def test_kron2c_indices(n_tests=100):
"""Test utils._kronecker.kron2c_indices()."""
mask = opinf.utils.kron2c_indices(4)
assert np.all(mask == np.array([[0, 0],
[1, 0], [1, 1],
[2, 0], [2, 1], [2, 2],
[3, 0], [3, 1], [3, 2], [3, 3]],
dtype=int))
submask = opinf.utils.kron2c_indices(3)
assert np.allclose(submask, mask[:6])
r = 10
_r2 = r * (r + 1) // 2
mask = opinf.utils.kron2c_indices(r)
assert mask.shape == (_r2, 2)
assert np.all(mask[0] == 0)
assert np.all(mask[-1] == r - 1)
assert mask.sum(axis=0)[0] == sum(i*(i+1) for i in range(r))
# Ensure consistency with utils.kron2c().
for _ in range(n_tests):
x = np.random.random(r)
assert np.allclose(np.prod(x[mask], axis=1), opinf.utils.kron2c(x))
def test_kron3c_indices(n_tests=100):
"""Test utils._kronecker.kron3c_indices()."""
mask = opinf.utils.kron3c_indices(2)
assert np.all(mask == np.array([[0, 0, 0],
[1, 0, 0], [1, 1, 0], [1, 1, 1]],
dtype=int))
r = 10
mask = opinf.utils.kron3c_indices(r)
_r3 = r * (r + 1) * (r + 2) // 6
mask = opinf.utils.kron3c_indices(r)
assert mask.shape == (_r3, 3)
assert np.all(mask[0] == 0)
assert np.all(mask[-1] == r - 1)
# Ensure consistency with utils.kron3c().
for _ in range(n_tests):
x = np.random.random(r)
assert np.allclose(np.prod(x[mask], axis=1), opinf.utils.kron3c(x))
# Kronecker (Khatri-Rao) products =============================================
# utils.kron2c() --------------------------------------------------------------
def _test_kron2c_single_vector(n):
"""Do one vector test of utils._kronecker.kron2c()."""
x = np.random.random(n)
x2 = opinf.utils.kron2c(x)
assert x2.ndim == 1
assert x2.shape[0] == n*(n+1)//2
for i in range(n):
assert np.allclose(x2[i*(i+1)//2:(i+1)*(i+2)//2], x[i]*x[:i+1])
def _test_kron2c_single_matrix(n):
"""Do one matrix test of utils._kronecker.kron2c()."""
X = np.random.random((n,n))
X2 = opinf.utils.kron2c(X)
assert X2.ndim == 2
assert X2.shape[0] == n*(n+1)//2
assert X2.shape[1] == n
for i in range(n):
assert np.allclose(X2[i*(i+1)//2:(i+1)*(i+2)//2], X[i]*X[:i+1])
def test_kron2c(n_tests=100):
"""Test utils._kronecker.kron2c()."""
# Try with bad input.
with pytest.raises(ValueError) as exc:
opinf.utils.kron2c(np.random.random((3,3,3)), checkdim=True)
assert exc.value.args[0] == "x must be one- or two-dimensional"
# Correct inputs.
for n in np.random.randint(2, 100, n_tests):
_test_kron2c_single_vector(n)
_test_kron2c_single_matrix(n)
# utils.kron3c() --------------------------------------------------------------
def _test_kron3c_single_vector(n):
"""Do one vector test of utils._kronecker.kron3c()."""
x = np.random.random(n)
x3 = opinf.utils.kron3c(x)
assert x3.ndim == 1
assert x3.shape[0] == n*(n+1)*(n+2)//6
for i in range(n):
assert np.allclose(x3[i*(i+1)*(i+2)//6:(i+1)*(i+2)*(i+3)//6],
x[i]*opinf.utils.kron2c(x[:i+1]))
def _test_kron3c_single_matrix(n):
"""Do one matrix test of utils._kronecker.kron3c()."""
X = np.random.random((n,n))
X3 = opinf.utils.kron3c(X)
assert X3.ndim == 2
assert X3.shape[0] == n*(n+1)*(n+2)//6
assert X3.shape[1] == n
for i in range(n):
assert np.allclose(X3[i*(i+1)*(i+2)//6:(i+1)*(i+2)*(i+3)//6],
X[i]*opinf.utils.kron2c(X[:i+1]))
def test_kron3c(n_tests=50):
"""Test utils._kronecker.kron3c()."""
# Try with bad input.
with pytest.raises(ValueError) as exc:
opinf.utils.kron3c(np.random.random((2,4,3)), checkdim=True)
assert exc.value.args[0] == "x must be one- or two-dimensional"
# Correct inputs.
for n in np.random.randint(2, 30, n_tests):
_test_kron3c_single_vector(n)
_test_kron3c_single_matrix(n)
# Matricized tensor management ================================================
# utils.expand_quadratic() ----------------------------------------------------
def _test_expand_quadratic_single(r):
"""Do one test of utils._kronecker.expand_quadratic()."""
x = np.random.random(r)
# Do a valid expand_quadratic() calculation and check dimensions.
s = r*(r+1)//2
Hc = np.random.random((r,s))
H = opinf.utils.expand_quadratic(Hc)
assert H.shape == (r,r**2)
# Check that Hc(x^2) == H(x⊗x).
Hxx = H @ np.kron(x,x)
assert np.allclose(Hc @ opinf.utils.kron2c(x), Hxx)
# Check properties of the tensor for H.
Htensor = H.reshape((r,r,r))
assert np.allclose(Htensor @ x @ x, Hxx)
for subH in H:
assert np.allclose(subH, subH.T)
def test_expand_quadratic(n_tests=100):
"""Test utils._kronecker.expand_quadratic()."""
# Try to do expand_quadratic() with a bad second dimension.
r = 5
sbad = r*(r+3)//2
Hc = np.random.random((r, sbad))
with pytest.raises(ValueError) as exc:
opinf.utils.expand_quadratic(Hc)
assert exc.value.args[0] == \
f"invalid shape (r,s) = {(r,sbad)} with s != r(r+1)/2"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 100, n_tests):
_test_expand_quadratic_single(r)
# utils.compress_quadratic() --------------------------------------------------
def _test_compress_quadratic_single(r):
"""Do one test of utils._kronecker.compress_quadratic()."""
x = np.random.random(r)
# Do a valid compress_quadratic() calculation and check dimensions.
H = np.random.random((r,r**2))
s = r*(r+1)//2
Hc = opinf.utils.compress_quadratic(H)
assert Hc.shape == (r,s)
# Check that Hc(x^2) == H(x⊗x).
Hxx = H @ np.kron(x,x)
assert np.allclose(Hxx, Hc @ opinf.utils.kron2c(x))
# Check that expand_quadratic() and compress_quadratic()
# are inverses up to symmetry.
H2 = opinf.utils.expand_quadratic(Hc)
Ht = H.reshape((r,r,r))
Htnew = np.empty_like(Ht)
for i in range(r):
Htnew[i] = (Ht[i] + Ht[i].T) / 2
assert np.allclose(H2, Htnew.reshape(H.shape))
def test_compress_quadratic(n_tests=100):
"""Test utils._kronecker.compress_quadratic()."""
# Try to do compress_quadratic() with a bad second dimension.
r = 5
r2bad = r**2 + 1
H = np.random.random((r, r2bad))
with pytest.raises(ValueError) as exc:
opinf.utils.compress_quadratic(H)
assert exc.value.args[0] == \
f"invalid shape (r,a) = {(r,r2bad)} with a != r**2"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 100, n_tests):
_test_compress_quadratic_single(r)
# utils.expand_cubic() --------------------------------------------------------
def _test_expand_cubic_single(r):
"""Do one test of utils._kronecker.expand_cubic()."""
x = np.random.random(r)
# Do a valid expand_cubic() calculation and check dimensions.
s = r*(r+1)*(r+2)//6
Gc = np.random.random((r,s))
G = opinf.utils.expand_cubic(Gc)
assert G.shape == (r,r**3)
# Check that Gc(x^3) == G(x⊗x⊗x).
Gxxx = G @ np.kron(x,np.kron(x,x))
assert np.allclose(Gc @ opinf.utils.kron3c(x), Gxxx)
# Check properties of the tensor for G.
Gtensor = G.reshape((r,r,r,r))
assert np.allclose(Gtensor @ x @ x @ x, Gxxx)
for subG in G:
assert np.allclose(subG, subG.T)
def test_expand_cubic(n_tests=50):
"""Test utils._kronecker.expand_cubic()."""
# Try to do expand_cubic() with a bad second dimension.
r = 5
sbad = r*(r+1)*(r+3)//6
Gc = np.random.random((r, sbad))
with pytest.raises(ValueError) as exc:
opinf.utils.expand_cubic(Gc)
assert exc.value.args[0] == \
f"invalid shape (r,s) = {(r,sbad)} with s != r(r+1)(r+2)/6"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 30, n_tests):
_test_expand_cubic_single(r)
# utils.compress_cubic() ------------------------------------------------------
def _test_compress_cubic_single(r):
"""Do one test of utils._kronecker.compress_cubic()."""
x = np.random.random(r)
# Do a valid compress_cubic() calculation and check dimensions.
G = np.random.random((r,r**3))
s = r*(r+1)*(r+2)//6
Gc = opinf.utils.compress_cubic(G)
assert Gc.shape == (r,s)
# Check that Gc(x^3) == G(x⊗x⊗x).
Gxxx = G @ np.kron(x,np.kron(x,x))
assert np.allclose(Gxxx, Gc @ opinf.utils.kron3c(x))
# Check that expand_cubic() and compress_cubic() are "inverses."
G_new = opinf.utils.expand_cubic(Gc)
assert np.allclose(Gc, opinf.utils.compress_cubic(G_new))
def test_compress_cubic(n_tests=50):
"""Test utils._kronecker.compress_cubic()."""
# Try to do compress_cubic() with a bad second dimension.
r = 5
r3bad = r**3 + 1
G = np.random.random((r, r3bad))
with pytest.raises(ValueError) as exc:
opinf.utils.compress_cubic(G)
assert exc.value.args[0] == \
f"invalid shape (r,a) = {(r,r3bad)} with a != r**3"
# Do 100 test cases of varying dimensions.
for r in np.random.randint(2, 30, n_tests):
_test_compress_cubic_single(r)
| [
"rom_operator_inference.utils.kron3c",
"numpy.prod",
"numpy.allclose",
"rom_operator_inference.utils.kron2c",
"numpy.random.random",
"numpy.kron",
"numpy.array",
"numpy.random.randint",
"rom_operator_inference.utils.expand_cubic",
"rom_operator_inference.utils.kron3c_indices",
"numpy.empty_like"... | [((338, 367), 'rom_operator_inference.utils.kron2c_indices', 'opinf.utils.kron2c_indices', (['(4)'], {}), '(4)\n', (364, 367), True, 'import rom_operator_inference as opinf\n'), ((654, 683), 'rom_operator_inference.utils.kron2c_indices', 'opinf.utils.kron2c_indices', (['(3)'], {}), '(3)\n', (680, 683), True, 'import rom_operator_inference as opinf\n'), ((695, 725), 'numpy.allclose', 'np.allclose', (['submask', 'mask[:6]'], {}), '(submask, mask[:6])\n', (706, 725), True, 'import numpy as np\n'), ((776, 805), 'rom_operator_inference.utils.kron2c_indices', 'opinf.utils.kron2c_indices', (['r'], {}), '(r)\n', (802, 805), True, 'import rom_operator_inference as opinf\n'), ((851, 871), 'numpy.all', 'np.all', (['(mask[0] == 0)'], {}), '(mask[0] == 0)\n', (857, 871), True, 'import numpy as np\n'), ((883, 908), 'numpy.all', 'np.all', (['(mask[-1] == r - 1)'], {}), '(mask[-1] == r - 1)\n', (889, 908), True, 'import numpy as np\n'), ((1259, 1288), 'rom_operator_inference.utils.kron3c_indices', 'opinf.utils.kron3c_indices', (['(2)'], {}), '(2)\n', (1285, 1288), True, 'import rom_operator_inference as opinf\n'), ((1476, 1505), 'rom_operator_inference.utils.kron3c_indices', 'opinf.utils.kron3c_indices', (['r'], {}), '(r)\n', (1502, 1505), True, 'import rom_operator_inference as opinf\n'), ((1554, 1583), 'rom_operator_inference.utils.kron3c_indices', 'opinf.utils.kron3c_indices', (['r'], {}), '(r)\n', (1580, 1583), True, 'import rom_operator_inference as opinf\n'), ((1629, 1649), 'numpy.all', 'np.all', (['(mask[0] == 0)'], {}), '(mask[0] == 0)\n', (1635, 1649), True, 'import numpy as np\n'), ((1661, 1686), 'numpy.all', 'np.all', (['(mask[-1] == r - 1)'], {}), '(mask[-1] == r - 1)\n', (1667, 1686), True, 'import numpy as np\n'), ((2135, 2154), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (2151, 2154), True, 'import numpy as np\n'), ((2164, 2185), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x'], {}), '(x)\n', (2182, 2185), True, 'import rom_operator_inference as opinf\n'), ((2446, 2470), 'numpy.random.random', 'np.random.random', (['(n, n)'], {}), '((n, n))\n', (2462, 2470), True, 'import numpy as np\n'), ((2479, 2500), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['X'], {}), '(X)\n', (2497, 2500), True, 'import rom_operator_inference as opinf\n'), ((3001, 3035), 'numpy.random.randint', 'np.random.randint', (['(2)', '(100)', 'n_tests'], {}), '(2, 100, n_tests)\n', (3018, 3035), True, 'import numpy as np\n'), ((3297, 3316), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (3313, 3316), True, 'import numpy as np\n'), ((3326, 3347), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['x'], {}), '(x)\n', (3344, 3347), True, 'import rom_operator_inference as opinf\n'), ((3673, 3697), 'numpy.random.random', 'np.random.random', (['(n, n)'], {}), '((n, n))\n', (3689, 3697), True, 'import numpy as np\n'), ((3706, 3727), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['X'], {}), '(X)\n', (3724, 3727), True, 'import rom_operator_inference as opinf\n'), ((4292, 4325), 'numpy.random.randint', 'np.random.randint', (['(2)', '(30)', 'n_tests'], {}), '(2, 30, n_tests)\n', (4309, 4325), True, 'import numpy as np\n'), ((4673, 4692), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (4689, 4692), True, 'import numpy as np\n'), ((4792, 4816), 'numpy.random.random', 'np.random.random', (['(r, s)'], {}), '((r, s))\n', (4808, 4816), True, 'import numpy as np\n'), ((4824, 4856), 'rom_operator_inference.utils.expand_quadratic', 'opinf.utils.expand_quadratic', (['Hc'], {}), '(Hc)\n', (4852, 4856), True, 'import rom_operator_inference as opinf\n'), ((5097, 5130), 'numpy.allclose', 'np.allclose', (['(Htensor @ x @ x)', 'Hxx'], {}), '(Htensor @ x @ x, Hxx)\n', (5108, 5130), True, 'import numpy as np\n'), ((5390, 5417), 'numpy.random.random', 'np.random.random', (['(r, sbad)'], {}), '((r, sbad))\n', (5406, 5417), True, 'import numpy as np\n'), ((5660, 5694), 'numpy.random.randint', 'np.random.randint', (['(2)', '(100)', 'n_tests'], {}), '(2, 100, n_tests)\n', (5677, 5694), True, 'import numpy as np\n'), ((5931, 5950), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (5947, 5950), True, 'import numpy as np\n'), ((6032, 6061), 'numpy.random.random', 'np.random.random', (['(r, r ** 2)'], {}), '((r, r ** 2))\n', (6048, 6061), True, 'import numpy as np\n'), ((6087, 6120), 'rom_operator_inference.utils.compress_quadratic', 'opinf.utils.compress_quadratic', (['H'], {}), '(H)\n', (6117, 6120), True, 'import rom_operator_inference as opinf\n'), ((6376, 6408), 'rom_operator_inference.utils.expand_quadratic', 'opinf.utils.expand_quadratic', (['Hc'], {}), '(Hc)\n', (6404, 6408), True, 'import rom_operator_inference as opinf\n'), ((6449, 6466), 'numpy.empty_like', 'np.empty_like', (['Ht'], {}), '(Ht)\n', (6462, 6466), True, 'import numpy as np\n'), ((6785, 6813), 'numpy.random.random', 'np.random.random', (['(r, r2bad)'], {}), '((r, r2bad))\n', (6801, 6813), True, 'import numpy as np\n'), ((7054, 7088), 'numpy.random.randint', 'np.random.randint', (['(2)', '(100)', 'n_tests'], {}), '(2, 100, n_tests)\n', (7071, 7088), True, 'import numpy as np\n'), ((7315, 7334), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (7331, 7334), True, 'import numpy as np\n'), ((7436, 7460), 'numpy.random.random', 'np.random.random', (['(r, s)'], {}), '((r, s))\n', (7452, 7460), True, 'import numpy as np\n'), ((7468, 7496), 'rom_operator_inference.utils.expand_cubic', 'opinf.utils.expand_cubic', (['Gc'], {}), '(Gc)\n', (7492, 7496), True, 'import rom_operator_inference as opinf\n'), ((7754, 7792), 'numpy.allclose', 'np.allclose', (['(Gtensor @ x @ x @ x)', 'Gxxx'], {}), '(Gtensor @ x @ x @ x, Gxxx)\n', (7765, 7792), True, 'import numpy as np\n'), ((8045, 8072), 'numpy.random.random', 'np.random.random', (['(r, sbad)'], {}), '((r, sbad))\n', (8061, 8072), True, 'import numpy as np\n'), ((8316, 8349), 'numpy.random.randint', 'np.random.randint', (['(2)', '(30)', 'n_tests'], {}), '(2, 30, n_tests)\n', (8333, 8349), True, 'import numpy as np\n'), ((8574, 8593), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (8590, 8593), True, 'import numpy as np\n'), ((8671, 8700), 'numpy.random.random', 'np.random.random', (['(r, r ** 3)'], {}), '((r, r ** 3))\n', (8687, 8700), True, 'import numpy as np\n'), ((8732, 8761), 'rom_operator_inference.utils.compress_cubic', 'opinf.utils.compress_cubic', (['G'], {}), '(G)\n', (8758, 8761), True, 'import rom_operator_inference as opinf\n'), ((9008, 9036), 'rom_operator_inference.utils.expand_cubic', 'opinf.utils.expand_cubic', (['Gc'], {}), '(Gc)\n', (9032, 9036), True, 'import rom_operator_inference as opinf\n'), ((9289, 9317), 'numpy.random.random', 'np.random.random', (['(r, r3bad)'], {}), '((r, r3bad))\n', (9305, 9317), True, 'import numpy as np\n'), ((9554, 9587), 'numpy.random.randint', 'np.random.randint', (['(2)', '(30)', 'n_tests'], {}), '(2, 30, n_tests)\n', (9571, 9587), True, 'import numpy as np\n'), ((1062, 1081), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (1078, 1081), True, 'import numpy as np\n'), ((1775, 1794), 'numpy.random.random', 'np.random.random', (['r'], {}), '(r)\n', (1791, 1794), True, 'import numpy as np\n'), ((2285, 2359), 'numpy.allclose', 'np.allclose', (['x2[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]', '(x[i] * x[:i + 1])'], {}), '(x2[i * (i + 1) // 2:(i + 1) * (i + 2) // 2], x[i] * x[:i + 1])\n', (2296, 2359), True, 'import numpy as np\n'), ((2628, 2702), 'numpy.allclose', 'np.allclose', (['X2[i * (i + 1) // 2:(i + 1) * (i + 2) // 2]', '(X[i] * X[:i + 1])'], {}), '(X2[i * (i + 1) // 2:(i + 1) * (i + 2) // 2], X[i] * X[:i + 1])\n', (2639, 2702), True, 'import numpy as np\n'), ((2794, 2819), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2807, 2819), False, 'import pytest\n'), ((4085, 4110), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4098, 4110), False, 'import pytest\n'), ((4939, 4952), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (4946, 4952), True, 'import numpy as np\n'), ((5165, 5190), 'numpy.allclose', 'np.allclose', (['subH', 'subH.T'], {}), '(subH, subH.T)\n', (5176, 5190), True, 'import numpy as np\n'), ((5427, 5452), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5440, 5452), False, 'import pytest\n'), ((5469, 5501), 'rom_operator_inference.utils.expand_quadratic', 'opinf.utils.expand_quadratic', (['Hc'], {}), '(Hc)\n', (5497, 5501), True, 'import rom_operator_inference as opinf\n'), ((6201, 6214), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (6208, 6214), True, 'import numpy as np\n'), ((6823, 6848), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6836, 6848), False, 'import pytest\n'), ((6865, 6898), 'rom_operator_inference.utils.compress_quadratic', 'opinf.utils.compress_quadratic', (['H'], {}), '(H)\n', (6895, 6898), True, 'import rom_operator_inference as opinf\n'), ((7827, 7852), 'numpy.allclose', 'np.allclose', (['subG', 'subG.T'], {}), '(subG, subG.T)\n', (7838, 7852), True, 'import numpy as np\n'), ((8082, 8107), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8095, 8107), False, 'import pytest\n'), ((8124, 8152), 'rom_operator_inference.utils.expand_cubic', 'opinf.utils.expand_cubic', (['Gc'], {}), '(Gc)\n', (8148, 8152), True, 'import rom_operator_inference as opinf\n'), ((9064, 9097), 'rom_operator_inference.utils.compress_cubic', 'opinf.utils.compress_cubic', (['G_new'], {}), '(G_new)\n', (9090, 9097), True, 'import rom_operator_inference as opinf\n'), ((9327, 9352), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9340, 9352), False, 'import pytest\n'), ((9369, 9398), 'rom_operator_inference.utils.compress_cubic', 'opinf.utils.compress_cubic', (['G'], {}), '(G)\n', (9395, 9398), True, 'import rom_operator_inference as opinf\n'), ((394, 500), 'numpy.array', 'np.array', (['[[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1], [3, 2], [3, 3]\n ]'], {'dtype': 'int'}), '([[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1], [\n 3, 2], [3, 3]], dtype=int)\n', (402, 500), True, 'import numpy as np\n'), ((1109, 1133), 'numpy.prod', 'np.prod', (['x[mask]'], {'axis': '(1)'}), '(x[mask], axis=1)\n', (1116, 1133), True, 'import numpy as np\n'), ((1135, 1156), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x'], {}), '(x)\n', (1153, 1156), True, 'import rom_operator_inference as opinf\n'), ((1315, 1380), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1]]'], {'dtype': 'int'}), '([[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1]], dtype=int)\n', (1323, 1380), True, 'import numpy as np\n'), ((1822, 1846), 'numpy.prod', 'np.prod', (['x[mask]'], {'axis': '(1)'}), '(x[mask], axis=1)\n', (1829, 1846), True, 'import numpy as np\n'), ((1848, 1869), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['x'], {}), '(x)\n', (1866, 1869), True, 'import rom_operator_inference as opinf\n'), ((2855, 2882), 'numpy.random.random', 'np.random.random', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (2871, 2882), True, 'import numpy as np\n'), ((4146, 4173), 'numpy.random.random', 'np.random.random', (['(2, 4, 3)'], {}), '((2, 4, 3))\n', (4162, 4173), True, 'import numpy as np\n'), ((4980, 5001), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x'], {}), '(x)\n', (4998, 5001), True, 'import rom_operator_inference as opinf\n'), ((6247, 6268), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x'], {}), '(x)\n', (6265, 6268), True, 'import rom_operator_inference as opinf\n'), ((7592, 7605), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (7599, 7605), True, 'import numpy as np\n'), ((7634, 7655), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['x'], {}), '(x)\n', (7652, 7655), True, 'import rom_operator_inference as opinf\n'), ((8855, 8868), 'numpy.kron', 'np.kron', (['x', 'x'], {}), '(x, x)\n', (8862, 8868), True, 'import numpy as np\n'), ((8903, 8924), 'rom_operator_inference.utils.kron3c', 'opinf.utils.kron3c', (['x'], {}), '(x)\n', (8921, 8924), True, 'import rom_operator_inference as opinf\n'), ((3540, 3569), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['x[:i + 1]'], {}), '(x[:i + 1])\n', (3558, 3569), True, 'import rom_operator_inference as opinf\n'), ((3948, 3977), 'rom_operator_inference.utils.kron2c', 'opinf.utils.kron2c', (['X[:i + 1]'], {}), '(X[:i + 1])\n', (3966, 3977), True, 'import rom_operator_inference as opinf\n')] |
#
# GADANN - GPU Accelerated Deep Artificial Neural Network
#
# Copyright (C) 2014 <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import logging
from . import kernels
logger = logging.getLogger(__name__)
class Updater(object):
def __init__(self):
pass
def update(self, args):
for key in params.iterkeys():
params[key] = params[key] + grads[key]*learning_rate
class SgdUpdater(Updater):
def __init__(self, learning_rate=0.1, weight_cost=0.01):
self.weight_cost = weight_cost
self.learning_rate = learning_rate
def update(self, params, grads):
for k, v in grads.items():
params[k] = params[k] - self.learning_rate*v - params[k]*self.weight_cost
def status(self):
return ''
class MomentumUpdater(Updater):
def __init__(self, learning_rate=0.1, inertia=0.9, weight_cost=0.00):
self.inertia = inertia
self.weight_cost = weight_cost
self.learning_rate = learning_rate
def update(self, params, grads):
try:
self.velocities = [self.inertia*v + (1-self.inertia)*g for (v,g) in zip(self.velocities, grads)]
except:
self.velocities = [(1-self.inertia)*g for g in grads]
for i in range(len(params)):
params[i] = params[i] - self.learning_rate*self.velocities[i] - params[i]*self.weight_cost
self.inertia += .001*(1-self.inertia)
def status(self):
return 'inertia:' + str(self.inertia)
class RmspropUpdater(Updater):
def __init__(self, learning_rate=0.1, inertia=0.0, weight_cost=0.00):
self.epsilon = 0.000001
self.inertia = inertia
self.weight_cost = weight_cost
self.learning_rate = learning_rate
def update(self, params, grads):
try:
self.accum = [self.inertia*a + (1-self.inertia)*(g**2) for (a,g) in zip(self.accum, grads)]
except:
self.accum = [(1-self.inertia)*(g**2) for g in grads]
for i in range(len(params)):
params[i] = params[i] - self.learning_rate * grads[i] / (kernels.sqrt(self.accum[i]) + self.epsilon) - params[i]*self.weight_cost
self.inertia += .001*(1-self.inertia)
def status(self):
return 'inertia:' + str(self.inertia)
| [
"logging.getLogger"
] | [((1244, 1271), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1261, 1271), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
import redis
import os
import telebot
import math
import random
import threading
from telebot import types
from emoji import emojize
from pymongo import MongoClient
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
admins=[441399484]
games={}
client1=os.environ['database']
client=MongoClient(client1)
db=client.chlenomer
users=db.ids_people
@bot.message_handler(commands=['begin'])
def begin(m):
if m.from_user.id in admins:
if m.chat.id in games:
bot.send_message(m.chat.id, 'Начинаем делать ставки (в лс бота)! Про коэффициенты выигрышей вы можете узнать с помощью команды /help')
games[m.chat.id]['began']=1
for ids in games[m.chat.id]['players']:
bot.send_message(ids, 'Напишите, сколько членокоинов вы хотите поставить.')
@bot.message_handler(commands=['help'])
def help(m):
bot.send_message(m.chat.id, 'Коэффициенты ставок:\n'+
'1-15, 16-30: *1.5*\n'+
'1-5, 6-10, 11-15, 16-20, 21-25, 26-30: *5*\n'+
'0: *25*', parse_mode='markdown')
@bot.message_handler(commands=['join'])
def join(m):
if m.chat.id in games:
if m.from_user.id not in games[m.chat.id]['players']:
if games[m.chat.id]['began']!=1:
try:
bot.send_message(m.from_user.id, 'Вы присоединились к казино!')
bot.send_message(m.chat.id, m.from_user.first_name+' Вошел в казино!')
games[m.chat.id]['players'].update(createuser(m.from_user.id, m.from_user.first_name))
except:
bot.send_message(m.chat.id, m.from_user.first_name+', сначала напишите боту в личку!')
@bot.message_handler(commands=['roll'])
def roll(m):
if m.from_user.id in admins:
if m.chat.id in games:
if games[m.chat.id]['began']==1:
x=random.randint(0,30)
msg=bot.send_message(m.chat.id, 'Крутим барабан...\n'+'🕐')
t=threading.Timer(0.1, roll2, args=[m.chat.id, msg.message_id])
t.start()
def roll2(id, id2):
medit('Крутим барабан...\n'+'🕑', id, id2)
t=threading.Timer(0.1, roll3, args=[id, id2])
t.start()
def roll3(id, id2):
medit('Крутим барабан...\n'+'🕒', id, id2)
t=threading.Timer(0.1, roll4, args=[id, id2])
t.start()
def roll4(id, id2):
medit('Крутим барабан...\n'+'🕓', id, id2)
t=threading.Timer(0.1, roll5, args=[id, id2])
t.start()
def roll5(id, id2):
medit('Крутим барабан...\n'+'🕔', id, id2)
t=threading.Timer(0.1, roll6, args=[id, id2])
t.start()
def roll6(id, id2):
medit('Крутим барабан...\n'+'🕕', id, id2) #half
t=threading.Timer(0.1, roll7, args=[id, id2])
t.start()
def roll7(id, id2):
medit('Крутим барабан...\n'+'🕖', id, id2)
t=threading.Timer(0.1, roll8, args=[id, id2])
t.start()
def roll8(id, id2):
medit('Крутим барабан...\n'+'🕗', id, id2)
t=threading.Timer(0.1, roll9, args=[id, id2])
t.start()
def roll9(id, id2):
medit('Крутим барабан...\n'+'🕘', id, id2)
t=threading.Timer(0.1, roll10, args=[id, id2])
t.start()
def roll10(id, id2):
medit('Крутим барабан...\n'+'🕙', id, id2)
t=threading.Timer(0.1, roll11, args=[id, id2])
t.start()
def roll11(id, id2):
medit('Крутим барабан...\n'+'🕚', id, id2)
t=threading.Timer(0.1, roll12, args=[id, id2])
t.start()
def roll12(id, id2):
medit('Крутим барабан...\n'+'🕛', id, id2)
t=threading.Timer(0.1, rollend, args=[id, id2])
t.start()
def rollend(id, id2):
x=random.randint(0,30)
medit('Выпавшее число: *'+str(x)+'*.', id, id2)
text='Результаты:\n\n'
for ids in games[id]['players']:
if games[id]['players'][ids]['betto']!=None:
if games[id]['players'][ids]['betto']=='1-15':
if x>0 and x<=15:
win=games[id]['players'][ids]['bet']*1.5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='16-30':
if x>=16 and x<=30:
win=games[id]['players'][ids]['bet']*1.5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='1-5':
if x>=1 and x<=5:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='6-10':
if x>=6 and x<=10:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
print(win)
if games[id]['players'][ids]['betto']=='11-15':
if x>=11 and x<=15:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='16-20':
if x>=16 and x<=20:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='21-25':
if x>=21 and x<=25:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='26-30':
if x>=26 and x<=30:
win=games[id]['players'][ids]['bet']*5
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
if games[id]['players'][ids]['betto']=='0':
if x==0:
win=games[id]['players'][ids]['bet']*25
win=round(win, 0)
text+='*'+games[id]['players'][ids]['name']+'*'+' Выиграл '+str(win)+' членокоин(ов)!\n'
users.update_one({'id':ids}, {'$inc':{'chlenocoins':-games[id]['players'][ids]['bet']}})
else:
win=-games[id]['players'][ids]['bet']
print(int(win))
text+='*'+games[id]['players'][ids]['name']+'*'+' проиграл '+str(win+(-win*2))+' членокоин(ов)!\n'
else:
text+='*'+games[id]['players'][ids]['name']+'*'+' Не поставил ничего!\n'
win=0
users.update_one({'id':ids}, {'$inc':{'chlenocoins':win}})
bot.send_message(id, text, parse_mode='markdown')
del games[id]
@bot.message_handler(commands=['stavki'])
def stavki(m):
if m.from_user.id in admins:
if m.chat.id not in games:
games.update(createlobby(m.chat.id))
bot.send_message(m.chat.id, 'Казино открыто! Жмите /join, чтобы испытать удачу и выиграть членокоины!')
else:
bot.send_message(m.chat.id, 'Создать лобби может только администратор казино!')
@bot.message_handler(content_types=['text'])
def texttt(m):
if m.chat.id==m.from_user.id:
i=0
for ids in games:
if m.from_user.id in games[ids]['players']:
i=1
y=games[ids]
if i==1:
try:
x=int(m.text)
x=round(x, 0)
player=users.find_one({'id':m.from_user.id})
if player!=None:
if player['chlenocoins']>=x:
y['players'][m.from_user.id]['bet']=x
kb=types.InlineKeyboardMarkup()
kb.add(types.InlineKeyboardButton(text='1-15', callback_data='1-15'),types.InlineKeyboardButton(text='16-30', callback_data='16-30'),types.InlineKeyboardButton(text='1-5', callback_data='1-5'))
kb.add(types.InlineKeyboardButton(text='6-10', callback_data='6-10'),types.InlineKeyboardButton(text='11-15', callback_data='11-15'),types.InlineKeyboardButton(text='16-20', callback_data='16-20'))
kb.add(types.InlineKeyboardButton(text='21-25', callback_data='21-25'),types.InlineKeyboardButton(text='26-30', callback_data='26-30'),types.InlineKeyboardButton(text='0', callback_data='0'))
bot.send_message(m.from_user.id, 'Вы поставили '+str(x)+' членокоинов! Теперь выберите, на что вы их ставите:', reply_markup=kb)
else:
bot.send_message(m.chat.id, 'Недостаточно членокоинов!')
except:
pass
@bot.callback_query_handler(func=lambda call:True)
def inline(call):
i=0
for ids in games:
if call.from_user.id in games[ids]['players']:
i=1
y=games[ids]
if i==1:
y['players'][call.from_user.id]['betto']=call.data
medit('Ставка принята. Вы поставили '+str(y['players'][call.from_user.id]['bet'])+' членокоинов на '+call.data+'! Ждите результатов в чате', call.from_user.id, call.message.message_id)
bot.send_message(y['id'], y['players'][call.from_user.id]['name']+' поставил '+str(y['players'][call.from_user.id]['bet'])+' членокоинов на '+call.data+'!')
def createuser(id, name):
return{id:{
'id':id,
'bet':None,
'betto':None,
'name':name
}
}
def createlobby(id):
return{id:{
'id':id,
'began':0,
'players':{},
'result':None,
'coef':{'1-15':1.5,
'16-30':1.5,
'0':25,
'1-5':5,
'6-10':5,
'11-15':5,
'16-20':5,
'21-25':5,
'26-30':5
}
}
}
def medit(message_text,chat_id, message_id,reply_markup=None,parse_mode='Markdown'):
return bot.edit_message_text(chat_id=chat_id,message_id=message_id,text=message_text,reply_markup=reply_markup,
parse_mode=parse_mode)
while True:
from requests.exceptions import ReadTimeout
from requests.exceptions import ConnectionError
try:
bot.polling()
except(ReadTimeout, ConnectionError):
pass
| [
"threading.Timer",
"telebot.TeleBot",
"telebot.types.InlineKeyboardButton",
"telebot.types.InlineKeyboardMarkup",
"pymongo.MongoClient",
"random.randint"
] | [((232, 254), 'telebot.TeleBot', 'telebot.TeleBot', (['token'], {}), '(token)\n', (247, 254), False, 'import telebot\n'), ((323, 343), 'pymongo.MongoClient', 'MongoClient', (['client1'], {}), '(client1)\n', (334, 343), False, 'from pymongo import MongoClient\n'), ((2202, 2245), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll3'], {'args': '[id, id2]'}), '(0.1, roll3, args=[id, id2])\n', (2217, 2245), False, 'import threading\n'), ((2337, 2380), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll4'], {'args': '[id, id2]'}), '(0.1, roll4, args=[id, id2])\n', (2352, 2380), False, 'import threading\n'), ((2477, 2520), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll5'], {'args': '[id, id2]'}), '(0.1, roll5, args=[id, id2])\n', (2492, 2520), False, 'import threading\n'), ((2617, 2660), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll6'], {'args': '[id, id2]'}), '(0.1, roll6, args=[id, id2])\n', (2632, 2660), False, 'import threading\n'), ((2761, 2804), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll7'], {'args': '[id, id2]'}), '(0.1, roll7, args=[id, id2])\n', (2776, 2804), False, 'import threading\n'), ((2899, 2942), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll8'], {'args': '[id, id2]'}), '(0.1, roll8, args=[id, id2])\n', (2914, 2942), False, 'import threading\n'), ((3038, 3081), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll9'], {'args': '[id, id2]'}), '(0.1, roll9, args=[id, id2])\n', (3053, 3081), False, 'import threading\n'), ((3181, 3225), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll10'], {'args': '[id, id2]'}), '(0.1, roll10, args=[id, id2])\n', (3196, 3225), False, 'import threading\n'), ((3326, 3370), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll11'], {'args': '[id, id2]'}), '(0.1, roll11, args=[id, id2])\n', (3341, 3370), False, 'import threading\n'), ((3471, 3515), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll12'], {'args': '[id, id2]'}), '(0.1, roll12, args=[id, id2])\n', (3486, 3515), False, 'import threading\n'), ((3616, 3661), 'threading.Timer', 'threading.Timer', (['(0.1)', 'rollend'], {'args': '[id, id2]'}), '(0.1, rollend, args=[id, id2])\n', (3631, 3661), False, 'import threading\n'), ((3717, 3738), 'random.randint', 'random.randint', (['(0)', '(30)'], {}), '(0, 30)\n', (3731, 3738), False, 'import random\n'), ((1914, 1935), 'random.randint', 'random.randint', (['(0)', '(30)'], {}), '(0, 30)\n', (1928, 1935), False, 'import random\n'), ((2020, 2081), 'threading.Timer', 'threading.Timer', (['(0.1)', 'roll2'], {'args': '[m.chat.id, msg.message_id]'}), '(0.1, roll2, args=[m.chat.id, msg.message_id])\n', (2035, 2081), False, 'import threading\n'), ((10472, 10500), 'telebot.types.InlineKeyboardMarkup', 'types.InlineKeyboardMarkup', ([], {}), '()\n', (10498, 10500), False, 'from telebot import types\n'), ((10532, 10593), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""1-15"""', 'callback_data': '"""1-15"""'}), "(text='1-15', callback_data='1-15')\n", (10558, 10593), False, 'from telebot import types\n'), ((10594, 10657), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""16-30"""', 'callback_data': '"""16-30"""'}), "(text='16-30', callback_data='16-30')\n", (10620, 10657), False, 'from telebot import types\n'), ((10658, 10717), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""1-5"""', 'callback_data': '"""1-5"""'}), "(text='1-5', callback_data='1-5')\n", (10684, 10717), False, 'from telebot import types\n'), ((10750, 10811), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""6-10"""', 'callback_data': '"""6-10"""'}), "(text='6-10', callback_data='6-10')\n", (10776, 10811), False, 'from telebot import types\n'), ((10812, 10875), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""11-15"""', 'callback_data': '"""11-15"""'}), "(text='11-15', callback_data='11-15')\n", (10838, 10875), False, 'from telebot import types\n'), ((10876, 10939), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""16-20"""', 'callback_data': '"""16-20"""'}), "(text='16-20', callback_data='16-20')\n", (10902, 10939), False, 'from telebot import types\n'), ((10972, 11035), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""21-25"""', 'callback_data': '"""21-25"""'}), "(text='21-25', callback_data='21-25')\n", (10998, 11035), False, 'from telebot import types\n'), ((11036, 11099), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""26-30"""', 'callback_data': '"""26-30"""'}), "(text='26-30', callback_data='26-30')\n", (11062, 11099), False, 'from telebot import types\n'), ((11100, 11155), 'telebot.types.InlineKeyboardButton', 'types.InlineKeyboardButton', ([], {'text': '"""0"""', 'callback_data': '"""0"""'}), "(text='0', callback_data='0')\n", (11126, 11155), False, 'from telebot import types\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Time: 2021-10-13 8:30 下午
Author: huayang
Subject:
"""
import os
import sys
import json
import doctest
from typing import *
from collections import defaultdict
from torch.nn import functional as F # noqa
from huaytools.pytorch.modules.loss.mean_squared_error import mean_squared_error_loss
def cosine_similarity_loss(x1, x2, labels):
""" cosine 相似度损失
Examples:
# >>> logits = torch.randn(5, 5).clamp(min=_EPSILON) # 负对数似然的输入需要值大于 0
# >>> labels = torch.arange(5)
# >>> onehot_labels = F.one_hot(labels)
#
# # 与官方结果比较
# >>> my_ret = negative_log_likelihood_loss(logits, onehot_labels)
# >>> official_ret = F.nll_loss(torch.log(logits + _EPSILON), labels, reduction='none')
# >>> assert torch.allclose(my_ret, official_ret, atol=1e-5)
Args:
x1: [B, N]
x2: same shape as x1
labels: [B] or scalar
Returns:
[B] vector or scalar
"""
cosine_scores = F.cosine_similarity(x1, x2, dim=-1) # [B]
return mean_squared_error_loss(cosine_scores, labels) # [B]
def _test():
""""""
doctest.testmod()
if __name__ == '__main__':
""""""
_test()
| [
"torch.nn.functional.cosine_similarity",
"huaytools.pytorch.modules.loss.mean_squared_error.mean_squared_error_loss",
"doctest.testmod"
] | [((1024, 1059), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['x1', 'x2'], {'dim': '(-1)'}), '(x1, x2, dim=-1)\n', (1043, 1059), True, 'from torch.nn import functional as F\n'), ((1078, 1124), 'huaytools.pytorch.modules.loss.mean_squared_error.mean_squared_error_loss', 'mean_squared_error_loss', (['cosine_scores', 'labels'], {}), '(cosine_scores, labels)\n', (1101, 1124), False, 'from huaytools.pytorch.modules.loss.mean_squared_error import mean_squared_error_loss\n'), ((1162, 1179), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1177, 1179), False, 'import doctest\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 12:48:08 2020
@author: smith
"""
import spacy
from gensim.test.utils import common_texts, get_tmpfile
from gensim.models import Word2Vec
from gensim.models.phrases import Phrases, Phraser
import os
import multiprocessing
import csv
import re
import pandas as pd
from time import time
from datetime import datetime
from collections import defaultdict
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
import logging
import gensim
logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO)
w2v_dir = '/home/smith/Smith_Scripts/NLP_GeneExpression/w2v_model/model071520/'
w2v_model = Word2Vec.load(os.path.join(w2v_dir, 'w2v_model071520_MarkerGenes_UpOC_DownOC_CombinedSentences.model'))
modelName = '_w2v071520_'
resultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/'
clusters = ['Cluster' + str(x) for x in range(20)]
category = 'CellTypes'
comparison = 'MarkerGenes'
termIndex = pd.read_excel(os.path.join(resultDirectory, 'MarkerGenes_Results/Combined_Clusters_' + category + '_' + comparison + '_Frequency.xlsx'), index_col=0)
termIndex = termIndex.sort_values(by='Combined Occurances', ascending=False)
enrichIndex = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Combined_Clusters_Enriched_CellTypes_MarkerGenes.xlsx', index_col=0)
enrIndex = enrichIndex.iloc[:,::4]
def calcTopSimilarities(cluster, category, min_freq=5, topn=2000, save=False):
resultDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
clusterDirectory = os.path.join(resultDirectory, cluster + '_MarkerGenes_Results/')
clusterNum=cluster.replace('Cluster', '')
genesDf = pd.read_excel('/d1/studies/cellranger/ACWS_DP/scanpy_DiffExp_V2/results_maxGenes3000_maxMito.05_MinDisp0.2/DP_OC_Saline_Merged_t-test_pval_table_500genes_clusters.xlsx')
genesList = genesDf[str(clusterNum) + '_n'].tolist()
genes = genesList
genes = []
for gene in genesList:
genes.append(gene.lower())
# words = pd.read_excel(os.path.join(resultDirectory, str(cluster) + '_' + comparison + '_Results/' + category + '_' + cluster + '_Frequency.xlsx'), index_col=0)
# words = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster0_EnrichedFunctions_onlyTest.xlsx', index_col=0)
# wordsRedacted = words.loc[words['Occurances'] > min_freq]['word'].tolist()
words = enrIndex
wordsRedacted = words[cluster + ' term'].tolist()[:-1]
if category == 'CellTypes':
wordsRedacted = termIndex['word'].tolist()[:150]
newWords = []
for item in wordsRedacted:
try:
item = item.replace(' ', '_')
newWords.append(item)
except AttributeError:
pass
cat = pd.DataFrame()
catX = pd.DataFrame()
for gene in genes:
gene = gene.lower()
try:
df = pd.DataFrame(w2v_model.wv.most_similar(positive=[str(gene)], topn=topn), columns=['entity', 'similarity'])
df['gene'] = gene
df2 = df.loc[df['entity'].isin(newWords)]
df2 = df2.reset_index(drop=True)
dfX = pd.DataFrame(w2v_model.wv.most_similar(positive=[str(gene)], topn=topn), columns=['entity ' + gene, 'similarity ' + gene])
dfX2 = dfX.loc[dfX['entity ' + gene].isin(newWords)]
dfX2 = dfX2.reset_index(drop=True)
cat = pd.concat([cat, df2], axis=0)
cat = cat.reset_index(drop=True)
catX = pd.concat([catX, dfX2], axis=1)
catX = catX.reset_index(drop=True)
except KeyError:
pass
if save:
# cat.to_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))
# catX.to_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx'))
cat.to_excel(os.path.join(resultDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))
catX.to_excel(os.path.join(resultDirectory, cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx'))
return(cat, catX)
def averageSimilarities(cluster, category):
clusterDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
# clusterDirectory = os.path.join(resultDirectory, cluster + '_MarkerGenes_Results/')
if not os.path.exists(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')):
raise FileNotFoundError("Similarities file doesn't exist at " + os.path.join(clusterDirectory, cluster + modelName + '_Similarities_Enriched_' + category + '.xlsx'))
else:
df = pd.read_excel(os.path.join(clusterDirectory, cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx'))
itemList = []
aveList = []
stdList = []
weightList = []
countList = []
geneList = []
for item in df['entity'].unique().tolist():
ave = np.mean(df.loc[df['entity']==item]['similarity'])
std = np.std(df.loc[df['entity']==item]['similarity'])
gene = df.loc[df['entity']==item]['gene'].tolist()
count = len(gene)
weightedAve = df.loc[df['entity']==item].shape[0]*ave
itemList.append(item)
aveList.append(ave)
stdList.append(std)
weightList.append(weightedAve)
countList.append(count)
geneList.append(gene)
df = pd.DataFrame(data=[itemList, aveList, stdList, weightList, countList, geneList]).T
df.columns=['entity', 'ave_similarity', 'stdev', 'weighted_ave', 'count', 'similar_genes']
df = df.sort_values(by='weighted_ave', ascending=False)
df = df.drop_duplicates(subset='entity', keep='first')
df.to_excel(os.path.join(clusterDirectory, cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx'))
return(df)
def combineAverageSims(clusters, category, save=True):
clusterDirectory = '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/AllComparisons_Results/'
bigDf = pd.DataFrame()
for cluster in clusters:
df = pd.read_excel(os.path.join(clusterDirectory, cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx'), index_col=0)
df.columns=[cluster + '_entity', cluster + '_average_sim', cluster + '_stdev', cluster + '_weightedAve', cluster + '_count', cluster + '_similarGenes']
bigDf = pd.concat([bigDf, df], axis=1)
if save:
bigDf.to_excel(os.path.join(clusterDirectory, 'Combined_AverageSimilarities' + modelName + category + '.xlsx'))
return(bigDf)
cat, catX = calcTopSimilarities('Cluster0', 'Functions', save=True)
df = averageSimilarities('Cluster0', 'Functions')
for cluster in clusters:
calcTopSimilarities(cluster, 'CellTypes', min_freq=5, topn=10000, save=True)
for cluster in clusters:
averageSimilarities(cluster, 'CellTypes')
df = combineAverageSims(clusters, 'CellTypes', save=True)
df = averageSimilarities('Cluster5', 'Functions')
###FREQUENCY DISTRIBUTION:
cat = pd.read_excel('/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster3_Results/Functions_cat_model062920_Cluster3.xlsx')
def tsnescatterplot(model, setName, word, list_names,):
""" Plot in seaborn the results from the t-SNE dimensionality reduction algorithm of the vectors of a query word,
its list of most similar words, and a list of words.
"""
arrays = np.empty((0, 300), dtype='f')
word_labels = [word]
color_list = ['red']
# adds the vector of the query word
arrays = np.append(arrays, model.wv.__getitem__([word]), axis=0)
# gets list of most similar words
close_words = model.wv.most_similar([word])
# adds the vector for each of the closest words to the array
try:
for wrd_score in close_words:
wrd_vector = model.wv.__getitem__([wrd_score[0]])
word_labels.append(wrd_score[0])
color_list.append('blue')
arrays = np.append(arrays, wrd_vector, axis=0)
# adds the vector for each of the words from list_names to the array
for wrd in list_names:
wrd_vector = model.wv.__getitem__([wrd])
word_labels.append(wrd)
color_list.append('green')
arrays = np.append(arrays, wrd_vector, axis=0)
except KeyError:
pass
# Reduces the dimensionality from 300 to 50 dimensions with PCA
reduc = PCA(n_components=42).fit_transform(arrays) ###### CHANGED FROM 50 DURING TUTORIAL
# Finds t-SNE coordinates for 2 dimensions
np.set_printoptions(suppress=True)
Y = TSNE(n_components=2, random_state=0, perplexity=10).fit_transform(reduc)
# Sets everything up to plot
df = pd.DataFrame({'x': [x for x in Y[:, 0]],
'y': [y for y in Y[:, 1]],
'words': word_labels,
'color': color_list})
fig, _ = plt.subplots()
fig.set_size_inches(9, 9)
# Basic plot
p1 = sns.regplot(data=df,
x="x",
y="y",
fit_reg=False,
marker="o",
scatter_kws={'s': 40,
'facecolors': df['color']
}
)
# Adds annotations one by one with a loop
for line in range(0, df.shape[0]):
p1.text(df["x"][line],
df['y'][line],
' ' + df["words"][line].title(),
horizontalalignment='left',
verticalalignment='bottom', size='medium',
color=df['color'][line],
weight='normal'
).set_size(15)
plt.xlim(Y[:, 0].min()-50, Y[:, 0].max()+50)
plt.ylim(Y[:, 1].min()-50, Y[:, 1].max()+50)
plt.title('t-SNE visualization for {}'.format(word.title()))
plt.savefig(os.path.join(resultDirectory, setName + modelName + word + '_tSNE_42PCs.png'))
tsnescatterplot(w2v_model, setName, word, newWords)
w2v_model.wv.most_similar(positive=["drug_addiction"], topn=20)
w2v_model.wv.most_similar(positive=["nucleus_accumbens"], topn=20)
w2v_model.wv.most_similar(positive=["vta"], topn=20)
w2v_model.wv.most_similar(positive=["dbi"], topn=20)
w2v_model.wv.most_similar(positive=["enkephalin", "cacng4"], negative=["opioid"], topn=20)
w2v_model.wv.most_similar(positive=["slc17a7", "cacng4"], negative=["glutamatergic_neuron"], topn=20)
###RUN PCA:
# fit a 2d PCA model to the vectors
X = w2v_model[w2v_model.wv.vocab]
pca = PCA(n_components=50)
result = pca.fit_transform(X)
#Plot the result
fig, ax = plt.subplots()
ax.plot(result[:, 0], result[:, 1], 'o')
ax.set_title('Entities')
plt.show()
words = list(w2v_model.wv.vocab.keys())
| [
"logging.basicConfig",
"numpy.mean",
"seaborn.regplot",
"sklearn.decomposition.PCA",
"numpy.set_printoptions",
"numpy.std",
"os.path.join",
"sklearn.manifold.TSNE",
"seaborn.set_style",
"numpy.append",
"numpy.empty",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.concat",
"matplotlib.p... | [((569, 594), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (582, 594), True, 'import seaborn as sns\n'), ((624, 738), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s - %(asctime)s: %(message)s"""', 'datefmt': '"""%H:%M:%S"""', 'level': 'logging.INFO'}), "(format='%(levelname)s - %(asctime)s: %(message)s',\n datefmt='%H:%M:%S', level=logging.INFO)\n", (643, 738), False, 'import logging\n'), ((1409, 1573), 'pandas.read_excel', 'pd.read_excel', (['"""/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Combined_Clusters_Enriched_CellTypes_MarkerGenes.xlsx"""'], {'index_col': '(0)'}), "(\n '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Combined_Clusters_Enriched_CellTypes_MarkerGenes.xlsx'\n , index_col=0)\n", (1422, 1573), True, 'import pandas as pd\n'), ((7399, 7553), 'pandas.read_excel', 'pd.read_excel', (['"""/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster3_Results/Functions_cat_model062920_Cluster3.xlsx"""'], {}), "(\n '/home/smith/Smith_Scripts/NLP_GeneExpression/spaCy_model062920/Results/Cluster3_Results/Functions_cat_model062920_Cluster3.xlsx'\n )\n", (7412, 7553), True, 'import pandas as pd\n'), ((10983, 11003), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(50)'}), '(n_components=50)\n', (10986, 11003), False, 'from sklearn.decomposition import PCA\n'), ((11061, 11075), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11073, 11075), True, 'import matplotlib.pyplot as plt\n'), ((11142, 11152), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11150, 11152), True, 'import matplotlib.pyplot as plt\n'), ((843, 935), 'os.path.join', 'os.path.join', (['w2v_dir', '"""w2v_model071520_MarkerGenes_UpOC_DownOC_CombinedSentences.model"""'], {}), "(w2v_dir,\n 'w2v_model071520_MarkerGenes_UpOC_DownOC_CombinedSentences.model')\n", (855, 935), False, 'import os\n'), ((1181, 1306), 'os.path.join', 'os.path.join', (['resultDirectory', "('MarkerGenes_Results/Combined_Clusters_' + category + '_' + comparison +\n '_Frequency.xlsx')"], {}), "(resultDirectory, 'MarkerGenes_Results/Combined_Clusters_' +\n category + '_' + comparison + '_Frequency.xlsx')\n", (1193, 1306), False, 'import os\n'), ((1821, 1885), 'os.path.join', 'os.path.join', (['resultDirectory', "(cluster + '_MarkerGenes_Results/')"], {}), "(resultDirectory, cluster + '_MarkerGenes_Results/')\n", (1833, 1885), False, 'import os\n'), ((1946, 2125), 'pandas.read_excel', 'pd.read_excel', (['"""/d1/studies/cellranger/ACWS_DP/scanpy_DiffExp_V2/results_maxGenes3000_maxMito.05_MinDisp0.2/DP_OC_Saline_Merged_t-test_pval_table_500genes_clusters.xlsx"""'], {}), "(\n '/d1/studies/cellranger/ACWS_DP/scanpy_DiffExp_V2/results_maxGenes3000_maxMito.05_MinDisp0.2/DP_OC_Saline_Merged_t-test_pval_table_500genes_clusters.xlsx'\n )\n", (1959, 2125), True, 'import pandas as pd\n'), ((3037, 3051), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3049, 3051), True, 'import pandas as pd\n'), ((3063, 3077), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3075, 3077), True, 'import pandas as pd\n'), ((6377, 6391), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6389, 6391), True, 'import pandas as pd\n'), ((7802, 7831), 'numpy.empty', 'np.empty', (['(0, 300)'], {'dtype': '"""f"""'}), "((0, 300), dtype='f')\n", (7810, 7831), True, 'import numpy as np\n'), ((8962, 8996), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (8981, 8996), True, 'import numpy as np\n'), ((9130, 9245), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [x for x in Y[:, 0]], 'y': [y for y in Y[:, 1]], 'words': word_labels,\n 'color': color_list}"], {}), "({'x': [x for x in Y[:, 0]], 'y': [y for y in Y[:, 1]], 'words':\n word_labels, 'color': color_list})\n", (9142, 9245), True, 'import pandas as pd\n'), ((9329, 9343), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9341, 9343), True, 'import matplotlib.pyplot as plt\n'), ((9405, 9521), 'seaborn.regplot', 'sns.regplot', ([], {'data': 'df', 'x': '"""x"""', 'y': '"""y"""', 'fit_reg': '(False)', 'marker': '"""o"""', 'scatter_kws': "{'s': 40, 'facecolors': df['color']}"}), "(data=df, x='x', y='y', fit_reg=False, marker='o', scatter_kws={\n 's': 40, 'facecolors': df['color']})\n", (9416, 9521), True, 'import seaborn as sns\n'), ((5289, 5340), 'numpy.mean', 'np.mean', (["df.loc[df['entity'] == item]['similarity']"], {}), "(df.loc[df['entity'] == item]['similarity'])\n", (5296, 5340), True, 'import numpy as np\n'), ((5353, 5403), 'numpy.std', 'np.std', (["df.loc[df['entity'] == item]['similarity']"], {}), "(df.loc[df['entity'] == item]['similarity'])\n", (5359, 5403), True, 'import numpy as np\n'), ((5745, 5830), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[itemList, aveList, stdList, weightList, countList, geneList]'}), '(data=[itemList, aveList, stdList, weightList, countList, geneList]\n )\n', (5757, 5830), True, 'import pandas as pd\n'), ((6058, 6168), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx')"], {}), "(clusterDirectory, cluster + '_averageSimilarities_Enriched' +\n category + modelName + '.xlsx')\n", (6070, 6168), False, 'import os\n'), ((6761, 6791), 'pandas.concat', 'pd.concat', (['[bigDf, df]'], {'axis': '(1)'}), '([bigDf, df], axis=1)\n', (6770, 6791), True, 'import pandas as pd\n'), ((10325, 10402), 'os.path.join', 'os.path.join', (['resultDirectory', "(setName + modelName + word + '_tSNE_42PCs.png')"], {}), "(resultDirectory, setName + modelName + word + '_tSNE_42PCs.png')\n", (10337, 10402), False, 'import os\n'), ((3666, 3695), 'pandas.concat', 'pd.concat', (['[cat, df2]'], {'axis': '(0)'}), '([cat, df2], axis=0)\n', (3675, 3695), True, 'import pandas as pd\n'), ((3760, 3791), 'pandas.concat', 'pd.concat', (['[catX, dfX2]'], {'axis': '(1)'}), '([catX, dfX2], axis=1)\n', (3769, 3791), True, 'import pandas as pd\n'), ((4169, 4272), 'os.path.join', 'os.path.join', (['resultDirectory', "(cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')"], {}), "(resultDirectory, cluster + '_Similarities_Enriched_' +\n category + modelName + '.xlsx')\n", (4181, 4272), False, 'import os\n'), ((4292, 4400), 'os.path.join', 'os.path.join', (['resultDirectory', "(cluster + '_Similarities_Enriched_' + category + modelName + 'axis1.xlsx')"], {}), "(resultDirectory, cluster + '_Similarities_Enriched_' +\n category + modelName + 'axis1.xlsx')\n", (4304, 4400), False, 'import os\n'), ((4702, 4806), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')"], {}), "(clusterDirectory, cluster + '_Similarities_Enriched_' +\n category + modelName + '.xlsx')\n", (4714, 4806), False, 'import os\n'), ((5016, 5120), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + '_Similarities_Enriched_' + category + modelName + '.xlsx')"], {}), "(clusterDirectory, cluster + '_Similarities_Enriched_' +\n category + modelName + '.xlsx')\n", (5028, 5120), False, 'import os\n'), ((6456, 6566), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + '_averageSimilarities_Enriched' + category + modelName + '.xlsx')"], {}), "(clusterDirectory, cluster + '_averageSimilarities_Enriched' +\n category + modelName + '.xlsx')\n", (6468, 6566), False, 'import os\n'), ((6836, 6935), 'os.path.join', 'os.path.join', (['clusterDirectory', "('Combined_AverageSimilarities' + modelName + category + '.xlsx')"], {}), "(clusterDirectory, 'Combined_AverageSimilarities' + modelName +\n category + '.xlsx')\n", (6848, 6935), False, 'import os\n'), ((8368, 8405), 'numpy.append', 'np.append', (['arrays', 'wrd_vector'], {'axis': '(0)'}), '(arrays, wrd_vector, axis=0)\n', (8377, 8405), True, 'import numpy as np\n'), ((8672, 8709), 'numpy.append', 'np.append', (['arrays', 'wrd_vector'], {'axis': '(0)'}), '(arrays, wrd_vector, axis=0)\n', (8681, 8709), True, 'import numpy as np\n'), ((8824, 8844), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(42)'}), '(n_components=42)\n', (8827, 8844), False, 'from sklearn.decomposition import PCA\n'), ((9010, 9061), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(0)', 'perplexity': '(10)'}), '(n_components=2, random_state=0, perplexity=10)\n', (9014, 9061), False, 'from sklearn.manifold import TSNE\n'), ((4877, 4981), 'os.path.join', 'os.path.join', (['clusterDirectory', "(cluster + modelName + '_Similarities_Enriched_' + category + '.xlsx')"], {}), "(clusterDirectory, cluster + modelName +\n '_Similarities_Enriched_' + category + '.xlsx')\n", (4889, 4981), False, 'import os\n')] |
import setuptools
__author__ = 'serena'
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr==2.0.0'],
pbr=True)
| [
"setuptools.setup"
] | [((113, 170), 'setuptools.setup', 'setuptools.setup', ([], {'setup_requires': "['pbr==2.0.0']", 'pbr': '(True)'}), "(setup_requires=['pbr==2.0.0'], pbr=True)\n", (129, 170), False, 'import setuptools\n')] |
import os
import re
import html
import unidecode
import torch
HTML_CLEANER_REGEX = re.compile('<.*?>')
def clean_html(text):
"""remove html div tags"""
return re.sub(HTML_CLEANER_REGEX, ' ', text)
def binarize_labels(labels, hard=True):
"""If hard, binarizes labels to values of 0 & 1. If soft thresholds labels to [0,1] range."""
labels = np.array(labels)
min_label = min(labels)
label_range = max(labels)-min_label
if label_range == 0:
return labels
labels = (labels-min_label)/label_range
if hard:
labels = (labels > .5).astype(int)
return labels
def process_str(text, front_pad='\n ', end_pad=' ', maxlen=None, clean_markup=True,
clean_unicode=True, encode='utf-8', limit_repeats=3):
"""
Processes utf-8 encoded text according to the criterion specified in seciton 4 of https://arxiv.org/pdf/1704.01444.pdf (Radford et al).
We use unidecode to clean unicode text into ascii readable text
"""
if clean_markup:
text = clean_html(text)
if clean_unicode:
text = unidecode.unidecode(text)
text = html.unescape(text)
text = text.split()
if maxlen is not None:
len2use = maxlen-len(front_pad)-len(end_pad)
text = text[:len2use]
if limit_repeats > 0:
remove_repeats(text, limit_repeats, join=False)
text = front_pad+(" ".join(text))+end_pad
if encode is not None:
text = text.encode(encoding=encode)
return text
def remove_repeats(string, n, join=True):
count = 0
output = []
last = ''
for c in string:
if c == last:
count = count + 1
else:
count = 0
last = c
if count < n:
output.append(c)
if join:
return "".join(output)
return output
def tokenize_str_batch(strings, rtn_maxlen=True, process=True, maxlen=None):
"""
Tokenizes a list of strings into a ByteTensor
Args:
strings: List of utf-8 encoded strings to tokenize into ByteTensor form
rtn_maxlen: Boolean with functionality specified in Returns.lens
Returns:
batch_tensor: ByteTensor of shape `[len(strings),maxlen_of_strings]`
lens: Length of each string in strings after being preprocessed with `preprocess` (useful for
dynamic length rnns). If `rtn_maxlen` is `True` then max(lens) is returned instead.
"""
if process:
processed_strings = [process_str(x, maxlen=maxlen) for x in strings]
else:
processed_strings = [x.encode('ascii', 'ignore') for x in strings]
lens = list(map(len, processed_strings))
maxlen = max(lens)
batch_tensor = torch.ByteTensor(len(lens), maxlen)
for i, string in enumerate(processed_strings):
_tokenize_str(string, batch_tensor[i])
if not rtn_maxlen and rtn_maxlen is not None:
return batch_tensor, lens
if rtn_maxlen is None:
return batch_tensor
return batch_tensor, maxlen
def _tokenize_str(string, char_tensor=None):
"""
Parses a utf-8 encoded string and assigns to ByteTensor char_tensor.
If no char_tensor is provide one is created.
Typically used internally by `tokenize_str_batch`.
"""
if char_tensor is None:
char_tensor = torch.ByteTensor(len(string.encode()))
for i, char in enumerate(string):
char_tensor[i] = char
return char_tensor
| [
"re.sub",
"unidecode.unidecode",
"html.unescape",
"re.compile"
] | [((85, 104), 're.compile', 're.compile', (['"""<.*?>"""'], {}), "('<.*?>')\n", (95, 104), False, 'import re\n'), ((170, 207), 're.sub', 're.sub', (['HTML_CLEANER_REGEX', '""" """', 'text'], {}), "(HTML_CLEANER_REGEX, ' ', text)\n", (176, 207), False, 'import re\n'), ((1118, 1137), 'html.unescape', 'html.unescape', (['text'], {}), '(text)\n', (1131, 1137), False, 'import html\n'), ((1080, 1105), 'unidecode.unidecode', 'unidecode.unidecode', (['text'], {}), '(text)\n', (1099, 1105), False, 'import unidecode\n')] |
# -*- coding: utf-8 -*-
"""
Sphinx configuration for nexson.
Largely based on <NAME>'s conf.py in DendroPy.
"""
import sys
import os
import time
from sphinx.ext import autodoc
from nexson import __version__ as PROJECT_VERSION
# -- Sphinx Hackery ------------------------------------------------
# Following allows for a docstring of a method to be inserted "nakedly"
# (without the signature etc.) into the current context by, for example::
#
# .. autodocstringonly:: dendropy.dataio.newickreader.NewickReader.__init__
#
# Based on:
#
# http://stackoverflow.com/questions/7825263/including-docstring-in-sphinx-documentation
class DocStringOnlyMethodDocumenter(autodoc.MethodDocumenter):
objtype = "docstringonly"
# do not indent the content
content_indent = " "
# do not add a header to the docstring
def add_directive_header(self, sig):
pass
# def add_line(self, line, source, *lineno):
# """Append one line of generated reST to the output."""
# print self.indent + line
# self.directive.result.append(self.indent + line, source, *lineno)
class KeywordArgumentsOnlyMethodDocumenter(autodoc.MethodDocumenter):
objtype = "keywordargumentsonly"
priority = 0 # do not override normal autodocumenter
# do not indent the content
content_indent = " "
# do not add a header to the docstring
def add_directive_header(self, sig):
pass
def add_line(self, line, source, *lineno):
if ":Keyword Arguments:" in line:
line = line.replace(":Keyword Arguments:", " ")
self._emit_line = True
if getattr(self, "_emit_line", False):
self.directive.result.append(self.indent + line, source, *lineno)
def setup(app):
# app.add_autodocumenter(DocStringOnlyMethodDocumenter)
app.add_autodocumenter(KeywordArgumentsOnlyMethodDocumenter)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinxcontrib.napoleon', # requires: pip install sphinxcontrib-napoleon
# 'numpydoc', # requires: pip install numpydoc
]
# If 'both', then class docstring and class.__init__() docstring combined for
# class documentation.
# If 'init', then only class.__init__() docstring shown for class documentation
# (class docstring omitted).
# If not specified, then only class docstring shown for class documentation
# (__init__ docstring omitted).
autoclass_content = 'both' # or 'init'
# numpydoc settings
# numpydoc_show_class_members = False
# numpydoc_class_members_toctree = False
# Napoleon settings
# napoleon_google_docstring = True
# napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
# napoleon_use_admonition_for_examples = False
# napoleon_use_admonition_for_notes = False
# napoleon_use_admonition_for_references = False
# napoleon_use_ivar = False
# napoleon_use_param = False
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nexson'
copyright = u'2020-{}, Open Tree of Life developers'.format(time.strftime('%Y'))
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = PROJECT_VERSION
# The full version, including alpha/beta/rc tags.
release = PROJECT_VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
rst_prolog = """
.. |mth| replace:: <NAME>
.. _mth: https://phylo.bio.ku.edu/content/mark-t-holder
.. |Python| replace:: Python
.. _Python: http://www.python.org/
.. |Python27| replace:: Python 2.7
.. _Python 2.7: http://www.python.org/download/releases/2.7/
.. |Python2| replace:: Python 2
.. _Python 2: http://www.python.org/download/releases/2.7/
.. |Python3| replace:: Python 3
.. _Python 3: https://www.python.org/download/releases/3.4.0/
.. |setuptools| replace:: setuptools
.. _setuptools: http://pypi.python.org/pypi/setuptools
.. |pip| replace:: pip
.. _pip: http://pypi.python.org/pypi/pip
.. |Git| replace:: Git
.. _Git: http://git-scm.com/
"""
rst_prolog += """\
.. |nexson_source_archive_url| replace:: https://pypi.python.org/packages/source/P/nexson/nexson-%s.tar.gz
.. |nexson_source_archive| replace:: nexson source code archive
.. _nexson_source_archive: https//pypi.python.org/packages/source/P/nexson/nexson-%s.tar.gz
""" % (version, version)
rst_prolog += """\
.. |nexson_copyright| replace:: Copyright {copyright}. All rights reserved.
.. |
""".format(copyright=copyright)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_nexson_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = "_static/dendropy_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**" : ["logo.html",
"cleardiv.html",
"searchbox.html",
"cleardiv.html",
"localtoc.html",
"cleardiv.html",
"relations.html",
"cleardiv.html",
"side_supplemental.html"],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'nexsondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'nexson.tex', u'nexson Documentation',
u'Open Tree of Life developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
# ('library/index', 'dendropy', u'DendroPy Library API Reference',
# [u'<NAME> and <NAME>'], 1),
# ('primer/index', 'dendropy-primer', u'DendroPy Primer',
# [u'<NAME> and <NAME>'], 1),
# ('programs/sumtrees', 'sumtrees', u'SumTrees User Manual',
# [u'<NAME> and <NAME>'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
# ('library/index', 'DendroPy', u'DendroPy Documentation',
# u'<NAME> and <NAME>', 'DendroPy', 'Python library for phylogenetic computing',
# 'Miscellaneous'),
# ('primer/index', 'DendroPy-Primer', u'DendroPy Primer',
# u'<NAME> and <NAME>', 'DendroPy', 'Python library for phylogenetic computing',
# 'Miscellaneous'),
# ('programs/sumtrees', 'SumTrees', u'SumTrees Documentation',
# u'<NAME> and <NAME>', 'DendroPy', 'Python library for phylogenetic computing',
# 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'nexson'
epub_author = u'Open Tree of Life developers'
epub_publisher = epub_author
epub_copyright = u'2020, {}'.format(epub_author)
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'nexson'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| [
"time.strftime"
] | [((4155, 4174), 'time.strftime', 'time.strftime', (['"""%Y"""'], {}), "('%Y')\n", (4168, 4174), False, 'import time\n')] |
from winrt.windows.media.control import GlobalSystemMediaTransportControlsSessionManager
from winrt.windows.storage.streams import DataReader, Buffer, InputStreamOptions
async def get_current_session():
"""
current_session.try_play_async()
current_session.try_pause_async()
current_session.try_toggle_play_pause()
current_session.try_change_shuffle_active()
current_session.try_skip_next()
current_session.try_skip_previous()
current_session.try_stop()
"""
sessions = await GlobalSystemMediaTransportControlsSessionManager.request_async()
return sessions.get_current_session()
async def get_media_info():
current_session = await get_current_session()
if current_session:
media_props = await current_session.try_get_media_properties_async()
return {
song_attr: media_props.__getattribute__(song_attr)
for song_attr in dir(media_props)
if song_attr[0] != '_'
}
async def read_stream_into_buffer(thumbnail_ref) -> bytearray:
buffer = Buffer(5000000)
readable_stream = await thumbnail_ref.open_read_async()
readable_stream.read_async(buffer, buffer.capacity, InputStreamOptions.READ_AHEAD)
buffer_reader = DataReader.from_buffer(buffer)
thumbnail_buffer = buffer_reader.read_bytes(buffer.length)
return bytearray(thumbnail_buffer)
| [
"winrt.windows.storage.streams.Buffer",
"winrt.windows.storage.streams.DataReader.from_buffer",
"winrt.windows.media.control.GlobalSystemMediaTransportControlsSessionManager.request_async"
] | [((1054, 1069), 'winrt.windows.storage.streams.Buffer', 'Buffer', (['(5000000)'], {}), '(5000000)\n', (1060, 1069), False, 'from winrt.windows.storage.streams import DataReader, Buffer, InputStreamOptions\n'), ((1237, 1267), 'winrt.windows.storage.streams.DataReader.from_buffer', 'DataReader.from_buffer', (['buffer'], {}), '(buffer)\n', (1259, 1267), False, 'from winrt.windows.storage.streams import DataReader, Buffer, InputStreamOptions\n'), ((516, 580), 'winrt.windows.media.control.GlobalSystemMediaTransportControlsSessionManager.request_async', 'GlobalSystemMediaTransportControlsSessionManager.request_async', ([], {}), '()\n', (578, 580), False, 'from winrt.windows.media.control import GlobalSystemMediaTransportControlsSessionManager\n')] |
from django.conf import settings
from django_webtest import WebTest
from evap.evaluation.models import Course
from evap.evaluation.models import UserProfile
from evap.rewards.models import SemesterActivation
from evap.rewards.models import RewardPointRedemptionEvent
from evap.rewards.tools import reward_points_of_user
from evap.staff.tests import lastform
from django.core.urlresolvers import reverse
from model_mommy import mommy
class RewardTests(WebTest):
fixtures = ['minimal_test_data_rewards']
csrf_checks = False
def test_delete_redemption_events(self):
"""
Submits a request that tries to delete an event where users already redeemed points -> should not work.
Secondly it issues a GET Request and asserts that the page for deleting events is returned.
Last it submits a request that should delete the event.
"""
# try to delete event that can not be deleted, because people already redeemed points
response = self.app.post(reverse("rewards:reward_point_redemption_event_delete", args=[1]), user="evap")
self.assertRedirects(response, reverse('rewards:reward_point_redemption_events'))
response = response.follow()
self.assertContains(response, "cannot be deleted")
self.assertTrue(RewardPointRedemptionEvent.objects.filter(pk=2).exists())
# make sure that a GET Request does not delete an event
response = self.app.get(reverse("rewards:reward_point_redemption_event_delete", args=[2]), user="evap")
self.assertTemplateUsed(response, "rewards_reward_point_redemption_event_delete.html")
self.assertTrue(RewardPointRedemptionEvent.objects.filter(pk=2).exists())
# now delete for real
response = self.app.post(reverse("rewards:reward_point_redemption_event_delete", args=[2]), user="evap")
self.assertRedirects(response, reverse('rewards:reward_point_redemption_events'))
self.assertFalse(RewardPointRedemptionEvent.objects.filter(pk=2).exists())
def test_redeem_reward_points(self):
"""
Submits a request that redeems all available reward points and checks that this works.
Also checks that it is not possible to redeem more points than the user actually has.
"""
response = self.app.get(reverse("rewards:index"), user="student")
self.assertEqual(response.status_code, 200)
user = UserProfile.objects.get(pk=5)
form = lastform(response)
form.set("points-1", reward_points_of_user(user))
response = form.submit()
self.assertEqual(response.status_code, 200)
self.assertContains(response, "You successfully redeemed your points.")
self.assertEqual(0, reward_points_of_user(user))
form.set("points-1", 1)
form.set("points-2", 3)
response = form.submit()
self.assertIn(b"have enough reward points.", response.body)
def test_create_redemption_event(self):
"""
submits a newly created redemption event and checks that the event has been created
"""
response = self.app.get(reverse("rewards:reward_point_redemption_event_create"), user="evap")
form = lastform(response)
form.set('name', 'Test3Event')
form.set('date', '2014-12-10')
form.set('redeem_end_date', '2014-11-20')
response = form.submit()
self.assertRedirects(response, reverse('rewards:reward_point_redemption_events'))
self.assertEqual(RewardPointRedemptionEvent.objects.count(), 3)
def test_edit_redemption_event(self):
"""
submits a changed redemption event and tests whether it actually has changed
"""
response = self.app.get(reverse("rewards:reward_point_redemption_event_edit", args=[2]), user="evap")
form = lastform(response)
name = form.get('name').value
form.set('name', 'new name')
response = form.submit()
self.assertRedirects(response, reverse('rewards:reward_point_redemption_events'))
self.assertNotEqual(RewardPointRedemptionEvent.objects.get(pk=2).name, name)
def test_grant_reward_points(self):
"""
submits several requests that trigger the reward point granting and checks that the reward point
granting works as expected for the different requests.
"""
user = UserProfile.objects.get(pk=5)
reward_points_before_end = reward_points_of_user(user)
response = self.app.get(reverse("student:vote", args=[9]), user="student")
form = lastform(response)
for key, value in form.fields.items():
if key is not None and "question" in key:
form.set(key, 6)
response = form.submit()
self.assertRedirects(response, reverse('student:index'))
# semester is not activated --> number of reward points should not increase
self.assertEqual(reward_points_before_end, reward_points_of_user(user))
# reset course for another try
course = Course.objects.get(pk=9)
course.voters = []
# activate semester
activation = SemesterActivation.objects.get(semester=course.semester)
activation.is_active = True
activation.save()
# create a new course
new_course = mommy.make(Course, semester=course.semester)
new_course.save()
new_course.participants.add(user)
new_course.save()
response = form.submit()
self.assertRedirects(response, reverse('student:index'))
# user also has other courses this semester --> number of reward points should not increase
self.assertEqual(reward_points_before_end, reward_points_of_user(user))
course.voters = []
course.save()
new_course.participants.remove(user)
new_course.save()
# last course of user so he may get reward points
response = form.submit()
self.assertRedirects(response, reverse('student:index'))
# if this test fails because of this assertion check that the user is allowed to receive reward points!
self.assertEqual(reward_points_before_end + settings.REWARD_POINTS_PER_SEMESTER, reward_points_of_user(user))
# test behaviour if user already got reward points
course.voters = []
course.save()
response = form.submit()
self.assertRedirects(response, reverse('student:index'))
self.assertEqual(reward_points_before_end + settings.REWARD_POINTS_PER_SEMESTER, reward_points_of_user(user))
| [
"evap.evaluation.models.Course.objects.get",
"evap.staff.tests.lastform",
"model_mommy.mommy.make",
"evap.rewards.models.SemesterActivation.objects.get",
"evap.evaluation.models.UserProfile.objects.get",
"evap.rewards.models.RewardPointRedemptionEvent.objects.count",
"django.core.urlresolvers.reverse",
... | [((2441, 2470), 'evap.evaluation.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'pk': '(5)'}), '(pk=5)\n', (2464, 2470), False, 'from evap.evaluation.models import UserProfile\n'), ((2486, 2504), 'evap.staff.tests.lastform', 'lastform', (['response'], {}), '(response)\n', (2494, 2504), False, 'from evap.staff.tests import lastform\n'), ((3234, 3252), 'evap.staff.tests.lastform', 'lastform', (['response'], {}), '(response)\n', (3242, 3252), False, 'from evap.staff.tests import lastform\n'), ((3859, 3877), 'evap.staff.tests.lastform', 'lastform', (['response'], {}), '(response)\n', (3867, 3877), False, 'from evap.staff.tests import lastform\n'), ((4418, 4447), 'evap.evaluation.models.UserProfile.objects.get', 'UserProfile.objects.get', ([], {'pk': '(5)'}), '(pk=5)\n', (4441, 4447), False, 'from evap.evaluation.models import UserProfile\n'), ((4483, 4510), 'evap.rewards.tools.reward_points_of_user', 'reward_points_of_user', (['user'], {}), '(user)\n', (4504, 4510), False, 'from evap.rewards.tools import reward_points_of_user\n'), ((4610, 4628), 'evap.staff.tests.lastform', 'lastform', (['response'], {}), '(response)\n', (4618, 4628), False, 'from evap.staff.tests import lastform\n'), ((5084, 5108), 'evap.evaluation.models.Course.objects.get', 'Course.objects.get', ([], {'pk': '(9)'}), '(pk=9)\n', (5102, 5108), False, 'from evap.evaluation.models import Course\n'), ((5185, 5241), 'evap.rewards.models.SemesterActivation.objects.get', 'SemesterActivation.objects.get', ([], {'semester': 'course.semester'}), '(semester=course.semester)\n', (5215, 5241), False, 'from evap.rewards.models import SemesterActivation\n'), ((5355, 5399), 'model_mommy.mommy.make', 'mommy.make', (['Course'], {'semester': 'course.semester'}), '(Course, semester=course.semester)\n', (5365, 5399), False, 'from model_mommy import mommy\n'), ((1017, 1082), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:reward_point_redemption_event_delete"""'], {'args': '[1]'}), "('rewards:reward_point_redemption_event_delete', args=[1])\n", (1024, 1082), False, 'from django.core.urlresolvers import reverse\n'), ((1136, 1185), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:reward_point_redemption_events"""'], {}), "('rewards:reward_point_redemption_events')\n", (1143, 1185), False, 'from django.core.urlresolvers import reverse\n'), ((1462, 1527), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:reward_point_redemption_event_delete"""'], {'args': '[2]'}), "('rewards:reward_point_redemption_event_delete', args=[2])\n", (1469, 1527), False, 'from django.core.urlresolvers import reverse\n'), ((1783, 1848), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:reward_point_redemption_event_delete"""'], {'args': '[2]'}), "('rewards:reward_point_redemption_event_delete', args=[2])\n", (1790, 1848), False, 'from django.core.urlresolvers import reverse\n'), ((1902, 1951), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:reward_point_redemption_events"""'], {}), "('rewards:reward_point_redemption_events')\n", (1909, 1951), False, 'from django.core.urlresolvers import reverse\n'), ((2331, 2355), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:index"""'], {}), "('rewards:index')\n", (2338, 2355), False, 'from django.core.urlresolvers import reverse\n'), ((2534, 2561), 'evap.rewards.tools.reward_points_of_user', 'reward_points_of_user', (['user'], {}), '(user)\n', (2555, 2561), False, 'from evap.rewards.tools import reward_points_of_user\n'), ((2756, 2783), 'evap.rewards.tools.reward_points_of_user', 'reward_points_of_user', (['user'], {}), '(user)\n', (2777, 2783), False, 'from evap.rewards.tools import reward_points_of_user\n'), ((3148, 3203), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:reward_point_redemption_event_create"""'], {}), "('rewards:reward_point_redemption_event_create')\n", (3155, 3203), False, 'from django.core.urlresolvers import reverse\n'), ((3454, 3503), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:reward_point_redemption_events"""'], {}), "('rewards:reward_point_redemption_events')\n", (3461, 3503), False, 'from django.core.urlresolvers import reverse\n'), ((3530, 3572), 'evap.rewards.models.RewardPointRedemptionEvent.objects.count', 'RewardPointRedemptionEvent.objects.count', ([], {}), '()\n', (3570, 3572), False, 'from evap.rewards.models import RewardPointRedemptionEvent\n'), ((3765, 3828), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:reward_point_redemption_event_edit"""'], {'args': '[2]'}), "('rewards:reward_point_redemption_event_edit', args=[2])\n", (3772, 3828), False, 'from django.core.urlresolvers import reverse\n'), ((4026, 4075), 'django.core.urlresolvers.reverse', 'reverse', (['"""rewards:reward_point_redemption_events"""'], {}), "('rewards:reward_point_redemption_events')\n", (4033, 4075), False, 'from django.core.urlresolvers import reverse\n'), ((4543, 4576), 'django.core.urlresolvers.reverse', 'reverse', (['"""student:vote"""'], {'args': '[9]'}), "('student:vote', args=[9])\n", (4550, 4576), False, 'from django.core.urlresolvers import reverse\n'), ((4836, 4860), 'django.core.urlresolvers.reverse', 'reverse', (['"""student:index"""'], {}), "('student:index')\n", (4843, 4860), False, 'from django.core.urlresolvers import reverse\n'), ((4998, 5025), 'evap.rewards.tools.reward_points_of_user', 'reward_points_of_user', (['user'], {}), '(user)\n', (5019, 5025), False, 'from evap.rewards.tools import reward_points_of_user\n'), ((5566, 5590), 'django.core.urlresolvers.reverse', 'reverse', (['"""student:index"""'], {}), "('student:index')\n", (5573, 5590), False, 'from django.core.urlresolvers import reverse\n'), ((5744, 5771), 'evap.rewards.tools.reward_points_of_user', 'reward_points_of_user', (['user'], {}), '(user)\n', (5765, 5771), False, 'from evap.rewards.tools import reward_points_of_user\n'), ((6025, 6049), 'django.core.urlresolvers.reverse', 'reverse', (['"""student:index"""'], {}), "('student:index')\n", (6032, 6049), False, 'from django.core.urlresolvers import reverse\n'), ((6252, 6279), 'evap.rewards.tools.reward_points_of_user', 'reward_points_of_user', (['user'], {}), '(user)\n', (6273, 6279), False, 'from evap.rewards.tools import reward_points_of_user\n'), ((6462, 6486), 'django.core.urlresolvers.reverse', 'reverse', (['"""student:index"""'], {}), "('student:index')\n", (6469, 6486), False, 'from django.core.urlresolvers import reverse\n'), ((6577, 6604), 'evap.rewards.tools.reward_points_of_user', 'reward_points_of_user', (['user'], {}), '(user)\n', (6598, 6604), False, 'from evap.rewards.tools import reward_points_of_user\n'), ((4105, 4149), 'evap.rewards.models.RewardPointRedemptionEvent.objects.get', 'RewardPointRedemptionEvent.objects.get', ([], {'pk': '(2)'}), '(pk=2)\n', (4143, 4149), False, 'from evap.rewards.models import RewardPointRedemptionEvent\n'), ((1307, 1354), 'evap.rewards.models.RewardPointRedemptionEvent.objects.filter', 'RewardPointRedemptionEvent.objects.filter', ([], {'pk': '(2)'}), '(pk=2)\n', (1348, 1354), False, 'from evap.rewards.models import RewardPointRedemptionEvent\n'), ((1661, 1708), 'evap.rewards.models.RewardPointRedemptionEvent.objects.filter', 'RewardPointRedemptionEvent.objects.filter', ([], {'pk': '(2)'}), '(pk=2)\n', (1702, 1708), False, 'from evap.rewards.models import RewardPointRedemptionEvent\n'), ((1978, 2025), 'evap.rewards.models.RewardPointRedemptionEvent.objects.filter', 'RewardPointRedemptionEvent.objects.filter', ([], {'pk': '(2)'}), '(pk=2)\n', (2019, 2025), False, 'from evap.rewards.models import RewardPointRedemptionEvent\n')] |
from unittest import TestCase
from ddipy.constants import MISSING_PARAMETER
from ddipy.ddi_utils import BadRequest
from ddipy.seo_client import SeoClient
class TestSeoClient(TestCase):
def test_seo_home(self):
client = SeoClient()
res = client.get_seo_home()
assert len(res.graph) > 0
def test_seo_search(self):
client = SeoClient()
res = client.get_seo_search()
assert res.name == "Browse"
def test_seo_api(self):
client = SeoClient()
res = client.get_seo_api()
assert res.name == "API"
def test_seo_database(self):
client = SeoClient()
res = client.get_seo_database()
assert res.name == "Databases"
def test_seo_dataset(self):
client = SeoClient()
res = client.get_seo_dataset("pride", "PXD000210")
assert res.name == "Proteome analysis by charge state-selective separation of peptides: a multidimensional approach"
try:
res = client.get_seo_dataset("pride", "PXDqqqqqqqq")
except BadRequest as err:
assert err.status == 500
try:
res = client.get_seo_dataset(None, "PXDqqqqqqqq")
except BadRequest as err:
assert err.status == MISSING_PARAMETER
def test_seo_about(self):
client = SeoClient()
res = client.get_seo_about()
assert res.name == "About OmicsDI"
| [
"ddipy.seo_client.SeoClient"
] | [((235, 246), 'ddipy.seo_client.SeoClient', 'SeoClient', ([], {}), '()\n', (244, 246), False, 'from ddipy.seo_client import SeoClient\n'), ((366, 377), 'ddipy.seo_client.SeoClient', 'SeoClient', ([], {}), '()\n', (375, 377), False, 'from ddipy.seo_client import SeoClient\n'), ((498, 509), 'ddipy.seo_client.SeoClient', 'SeoClient', ([], {}), '()\n', (507, 509), False, 'from ddipy.seo_client import SeoClient\n'), ((629, 640), 'ddipy.seo_client.SeoClient', 'SeoClient', ([], {}), '()\n', (638, 640), False, 'from ddipy.seo_client import SeoClient\n'), ((770, 781), 'ddipy.seo_client.SeoClient', 'SeoClient', ([], {}), '()\n', (779, 781), False, 'from ddipy.seo_client import SeoClient\n'), ((1324, 1335), 'ddipy.seo_client.SeoClient', 'SeoClient', ([], {}), '()\n', (1333, 1335), False, 'from ddipy.seo_client import SeoClient\n')] |
import re
from textwrap import indent
import mistune
from jinja2 import Template
from .directives.injection import InjectionDirective
from .directives.renvoi import RenvoiDirective
from .directives.section import SectionDirective
from .directives.question import QuestionDirective
from .directives.toc import DirectiveToc
from .typographie import typographie
class FrenchTypographyMixin:
def text(self, text_):
return typographie(super().text(text_))
def block_html(self, html):
return typographie(super().block_html(html))
class ClassMixin:
"""Possibilité d’ajouter une classe CSS sur un paragraphe ou un élément de liste.
Par exemple :
* {.maClasse} item classique de la liste en markdown
"""
RE_CLASS = re.compile(
r"""^
(?P<before>.*?)
(?:\s*\{\.(?P<class>[\w\- ]+?)\}\s*)
(?P<after>.*)
$
""",
re.MULTILINE | re.VERBOSE,
)
def paragraph(self, text):
return self._element_with_classes("p", text) or super().paragraph(text)
def list_item(self, text, level):
return self._element_with_classes("li", text) or super().list_item(text, level)
def _element_with_classes(self, name, text):
mo = self.RE_CLASS.match(text)
if mo is not None:
class_ = mo.group("class")
content = " ".join(filter(None, [mo.group("before"), mo.group("after")]))
return f'<{name} class="{class_}">{content}</{name}>\n'
class CustomHTMLRenderer(FrenchTypographyMixin, ClassMixin, mistune.HTMLRenderer):
pass
def create_markdown_parser(questions_index=None):
plugins = [
SectionDirective(),
QuestionDirective(),
DirectiveToc(),
]
if questions_index is not None:
plugins.append(RenvoiDirective(questions_index=questions_index))
plugins.append(InjectionDirective(questions_index=questions_index))
return mistune.create_markdown(
renderer=CustomHTMLRenderer(escape=False),
plugins=plugins,
)
class MarkdownContent:
"""Block content."""
def __init__(self, text, markdown):
self.text = text
self.markdown = markdown
def __str__(self):
return self.render_block()
def render_block(self):
return self.markdown(self.text)
def split(self, separator="\n---\n"):
return [
self.__class__(text.strip(), self.markdown)
for text in self.text.split(separator)
]
def render_me(self, tag="div"):
return f'<{tag} class="me visible">{str(self).strip()}</{tag}>'
def render_them(self, tag="div"):
return f'<{tag} class="them" hidden>{str(self).strip()}</{tag}>'
class MarkdownInlineContent(MarkdownContent):
"""Inline content."""
def __str__(self):
return self.render_inline()
def render_inline(self):
return self.markdown.inline(self.text, {}).strip()
def render_me(self):
return super().render_me(tag="span")
def render_them(self):
return super().render_them(tag="span")
def render_markdown_file(file_path, markdown_parser):
source = file_path.read_text()
templated_source = Template(source).render(formulaire=render_formulaire)
return MarkdownContent(templated_source, markdown_parser)
def render_formulaire(nom_formulaire, prefixe=""):
from .thematiques import THEMATIQUES_DIR
path = THEMATIQUES_DIR / "formulaires" / f"{nom_formulaire}.md"
with path.open() as f:
template = Template(f.read())
if prefixe:
prefixe = nom_formulaire + "-" + prefixe
else:
prefixe = nom_formulaire
markdown = (
f'<div class="formulaire" data-nom="{nom_formulaire}" data-prefixe="{prefixe}">\n\n'
+ template.render(prefixe=prefixe)
+ "\n\n</div>"
)
return indent(markdown, " ").lstrip()
| [
"textwrap.indent",
"jinja2.Template",
"re.compile"
] | [((760, 949), 're.compile', 're.compile', (['"""^\n (?P<before>.*?)\n (?:\\\\s*\\\\{\\\\.(?P<class>[\\\\w\\\\- ]+?)\\\\}\\\\s*)\n (?P<after>.*)\n $\n """', '(re.MULTILINE | re.VERBOSE)'], {}), '(\n """^\n (?P<before>.*?)\n (?:\\\\s*\\\\{\\\\.(?P<class>[\\\\w\\\\- ]+?)\\\\}\\\\s*)\n (?P<after>.*)\n $\n """\n , re.MULTILINE | re.VERBOSE)\n', (770, 949), False, 'import re\n'), ((3215, 3231), 'jinja2.Template', 'Template', (['source'], {}), '(source)\n', (3223, 3231), False, 'from jinja2 import Template\n'), ((3866, 3890), 'textwrap.indent', 'indent', (['markdown', '""" """'], {}), "(markdown, ' ')\n", (3872, 3890), False, 'from textwrap import indent\n')] |
from datetime import datetime
import pytest
from intents import Sys
from intents.connectors._experimental.snips import entities
from intents.connectors._experimental.snips import prediction_format as pf
def test_date_mapping_from_service():
mapping = entities.DateMapping()
snips_date_result = {
'input': 'My birthday is on august 24',
'intent': {
'intentName': 'UserSaysBirthday',
'probability': 1.0
},
'slots': [
{
'range': {'start': 15, 'end': 27},
'rawValue': 'on august 24',
'value': {
'kind': 'InstantTime',
'value': '2021-08-24 00:00:00 +02:00',
'grain': 'Day',
'precision': 'Exact'
},
'entity': 'snips/date',
'slotName': 'birthday_date'
}
]
}
parse_result = pf.from_dict(snips_date_result)
entity = mapping.from_service(parse_result.slots[0].value)
assert entity == Sys.Date(2021, 8, 24)
def test_date_mapping_unexpected_grain():
mapping = entities.DateMapping()
value = {
'kind': 'InstantTime',
'value': '2021-08-24 00:00:00 +02:00',
'grain': 'Month',
'precision': 'Exact'
}
entity = mapping.from_service(value)
with pytest.warns(None):
assert entity == Sys.Date(2021, 8, 24)
def test_date_mapping_unexpected_kind():
mapping = entities.DateMapping()
value = {
'kind': 'UNEXPECTED',
'value': '2021-08-24 00:00:00 +02:00',
'grain': 'Day',
'precision': 'Exact'
}
entity = mapping.from_service(value)
with pytest.warns(None):
assert entity == Sys.Date(2021, 8, 24)
def test_date_mapping_to_service():
mapping = entities.DateMapping()
assert mapping.to_service(Sys.Date(2021, 8, 8)) == "2021-08-08"
assert mapping.to_service(datetime(year=2021, month=8, day=8)) == "2021-08-08"
| [
"datetime.datetime",
"intents.Sys.Date",
"intents.connectors._experimental.snips.prediction_format.from_dict",
"intents.connectors._experimental.snips.entities.DateMapping",
"pytest.warns"
] | [((258, 280), 'intents.connectors._experimental.snips.entities.DateMapping', 'entities.DateMapping', ([], {}), '()\n', (278, 280), False, 'from intents.connectors._experimental.snips import entities\n'), ((949, 980), 'intents.connectors._experimental.snips.prediction_format.from_dict', 'pf.from_dict', (['snips_date_result'], {}), '(snips_date_result)\n', (961, 980), True, 'from intents.connectors._experimental.snips import prediction_format as pf\n'), ((1144, 1166), 'intents.connectors._experimental.snips.entities.DateMapping', 'entities.DateMapping', ([], {}), '()\n', (1164, 1166), False, 'from intents.connectors._experimental.snips import entities\n'), ((1493, 1515), 'intents.connectors._experimental.snips.entities.DateMapping', 'entities.DateMapping', ([], {}), '()\n', (1513, 1515), False, 'from intents.connectors._experimental.snips import entities\n'), ((1834, 1856), 'intents.connectors._experimental.snips.entities.DateMapping', 'entities.DateMapping', ([], {}), '()\n', (1854, 1856), False, 'from intents.connectors._experimental.snips import entities\n'), ((1065, 1086), 'intents.Sys.Date', 'Sys.Date', (['(2021)', '(8)', '(24)'], {}), '(2021, 8, 24)\n', (1073, 1086), False, 'from intents import Sys\n'), ((1370, 1388), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (1382, 1388), False, 'import pytest\n'), ((1716, 1734), 'pytest.warns', 'pytest.warns', (['None'], {}), '(None)\n', (1728, 1734), False, 'import pytest\n'), ((1415, 1436), 'intents.Sys.Date', 'Sys.Date', (['(2021)', '(8)', '(24)'], {}), '(2021, 8, 24)\n', (1423, 1436), False, 'from intents import Sys\n'), ((1761, 1782), 'intents.Sys.Date', 'Sys.Date', (['(2021)', '(8)', '(24)'], {}), '(2021, 8, 24)\n', (1769, 1782), False, 'from intents import Sys\n'), ((1887, 1907), 'intents.Sys.Date', 'Sys.Date', (['(2021)', '(8)', '(8)'], {}), '(2021, 8, 8)\n', (1895, 1907), False, 'from intents import Sys\n'), ((1955, 1990), 'datetime.datetime', 'datetime', ([], {'year': '(2021)', 'month': '(8)', 'day': '(8)'}), '(year=2021, month=8, day=8)\n', (1963, 1990), False, 'from datetime import datetime\n')] |
import pytest
from django.core.management import call_command
@pytest.mark.django_db
def test_common_runperiodic():
call_command('runperiodic')
| [
"django.core.management.call_command"
] | [((122, 149), 'django.core.management.call_command', 'call_command', (['"""runperiodic"""'], {}), "('runperiodic')\n", (134, 149), False, 'from django.core.management import call_command\n')] |
#!/usr/bin/env python3
#
# horizon.py - by <NAME> - 2019-12-18
#
# Example Python program for Astronomy Engine:
# https://github.com/cosinekitty/astronomy
#
# This is a more advanced example. It shows how to use coordinate
# transforms and a binary search to find the two azimuths where the
# ecliptic intersects with an observer's horizon at a given date and time.
#
# To execute, run the command:
#
# python3 horizon.py latitude longitude [yyyy-mm-ddThh:mm:ssZ]
#
import sys
import astronomy
from astro_demo_common import ParseArgs
NUM_SAMPLES = 4
def ECLIPLON(i):
return (360.0 * i) / NUM_SAMPLES
def HorizontalCoords(ecliptic_longitude, time, rot_ecl_hor):
eclip = astronomy.Spherical(
0.0, # being "on the ecliptic plane" means ecliptic latitude is zero.
ecliptic_longitude,
1.0 # any positive distance value will work fine.
)
# Convert ecliptic angular coordinates to ecliptic vector.
ecl_vec = astronomy.VectorFromSphere(eclip, time)
# Use the rotation matrix to convert ecliptic vector to horizontal vector.
hor_vec = astronomy.RotateVector(rot_ecl_hor, ecl_vec)
# Find horizontal angular coordinates, correcting for atmospheric refraction.
return astronomy.HorizonFromVector(hor_vec, astronomy.Refraction.Normal)
def Search(time, rot_ecl_hor, e1, e2):
tolerance = 1.0e-6 # one-millionth of a degree is close enough!
# Binary search: find the ecliptic longitude such that the horizontal altitude
# ascends through a zero value. The caller must pass e1, e2 such that the altitudes
# bound zero in ascending order.
while True:
e3 = (e1 + e2) / 2.0
h3 = HorizontalCoords(e3, time, rot_ecl_hor)
if abs(e2-e1) < tolerance:
return (e3, h3)
if h3.lat < 0.0:
e1 = e3
else:
e2 = e3
def FindEclipticCrossings(observer, time):
# The ecliptic is a celestial circle that describes the mean plane of
# the Earth's orbit around the Sun. We use J2000 ecliptic coordinates,
# meaning the x-axis is defined to where the plane of the Earth's
# equator on January 1, 2000 at noon UTC intersects the ecliptic plane.
# The positive x-axis points toward the March equinox.
# Calculate a rotation matrix that converts J2000 ecliptic vectors
# to horizontal vectors for this observer and time.
rot = astronomy.Rotation_ECL_HOR(time, observer)
# Sample several points around the ecliptic.
# Remember the horizontal coordinates for each sample.
hor = [HorizontalCoords(ECLIPLON(i), time, rot) for i in range(NUM_SAMPLES)]
for i in range(NUM_SAMPLES):
a1 = hor[i].lat
a2 = hor[(i+1) % NUM_SAMPLES].lat
e1 = ECLIPLON(i)
e2 = ECLIPLON(i+1)
if a1 * a2 <= 0.0:
if a2 > a1:
(ex, h) = Search(time, rot, e1, e2)
else:
(ex, h) = Search(time, rot, e2, e1)
if h.lon > 0.0 and h.lon < 180.0:
direction = 'ascends'
else:
direction = 'descends'
print('Ecliptic longitude {:0.4f} {} through horizon az {:0.4f}, alt {:0.5g}'.format(ex, direction, h.lon, h.lat))
return 0
if __name__ == '__main__':
observer, time = ParseArgs(sys.argv)
sys.exit(FindEclipticCrossings(observer, time))
| [
"astro_demo_common.ParseArgs",
"astronomy.Spherical",
"astronomy.RotateVector",
"astronomy.Rotation_ECL_HOR",
"astronomy.HorizonFromVector",
"astronomy.VectorFromSphere"
] | [((707, 756), 'astronomy.Spherical', 'astronomy.Spherical', (['(0.0)', 'ecliptic_longitude', '(1.0)'], {}), '(0.0, ecliptic_longitude, 1.0)\n', (726, 756), False, 'import astronomy\n'), ((1015, 1054), 'astronomy.VectorFromSphere', 'astronomy.VectorFromSphere', (['eclip', 'time'], {}), '(eclip, time)\n', (1041, 1054), False, 'import astronomy\n'), ((1149, 1193), 'astronomy.RotateVector', 'astronomy.RotateVector', (['rot_ecl_hor', 'ecl_vec'], {}), '(rot_ecl_hor, ecl_vec)\n', (1171, 1193), False, 'import astronomy\n'), ((1288, 1353), 'astronomy.HorizonFromVector', 'astronomy.HorizonFromVector', (['hor_vec', 'astronomy.Refraction.Normal'], {}), '(hor_vec, astronomy.Refraction.Normal)\n', (1315, 1353), False, 'import astronomy\n'), ((2452, 2494), 'astronomy.Rotation_ECL_HOR', 'astronomy.Rotation_ECL_HOR', (['time', 'observer'], {}), '(time, observer)\n', (2478, 2494), False, 'import astronomy\n'), ((3342, 3361), 'astro_demo_common.ParseArgs', 'ParseArgs', (['sys.argv'], {}), '(sys.argv)\n', (3351, 3361), False, 'from astro_demo_common import ParseArgs\n')] |