hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f726183b6b3459cd5406c87d8bc7c89200072c21 | 3,203 | py | Python | crnt4sbml/safety_wrap.py | PNNL-Comp-Mass-Spec/CRNT4SBML | 20406f452863f35f766b504fe2b3f3ab034b62fe | [
"Apache-2.0"
] | null | null | null | crnt4sbml/safety_wrap.py | PNNL-Comp-Mass-Spec/CRNT4SBML | 20406f452863f35f766b504fe2b3f3ab034b62fe | [
"Apache-2.0"
] | 1 | 2019-09-26T21:04:31.000Z | 2019-09-26T21:04:31.000Z | crnt4sbml/safety_wrap.py | PNNL-Comp-Mass-Spec/CRNT4SBML | 20406f452863f35f766b504fe2b3f3ab034b62fe | [
"Apache-2.0"
] | 1 | 2019-10-29T20:41:34.000Z | 2019-10-29T20:41:34.000Z | import os
import pickle
import numpy
import antimony
import roadrunner
import rrplugins
import sys
roadrunner.Logger.setLevel(roadrunner.Logger.LOG_ERROR)
roadrunner.Logger.disableLogging()
roadrunner.Logger.disableConsoleLogging()
roadrunner.Logger.disableFileLogging()
rrplugins.setLogLevel('error')
stderr_fileno = sys.stderr.fileno()
stderr_save = os.dup(stderr_fileno)
stderr_pipe = os.pipe()
os.dup2(stderr_pipe[1], stderr_fileno)
os.close(stderr_pipe[1])
# functions taken from Tellurium!! Give them
# credit, they deserve it!
#################################################
def __check_antimony_return_code(code):
if code < 0:
raise Exception('Antimony: {}'.format(antimony.getLastError()))
def __antimony_to_sbml(ant):
try:
isfile = os.path.isfile(ant)
except ValueError:
isfile = False
if isfile:
code = antimony.loadAntimonyFile(ant)
else:
code = antimony.loadAntimonyString(ant)
__check_antimony_return_code(code)
mid = antimony.getMainModuleName()
return antimony.getSBMLString(mid)
def __loada(ant):
return __load_antimony_model(ant)
def __load_antimony_model(ant):
sbml = __antimony_to_sbml(ant)
return roadrunner.RoadRunner(sbml)
with open('input_arguments.pickle', 'rb') as pickle_file:
input_arguments = pickle.loads(pickle_file.read())
ant_str = input_arguments[0]
direction = input_arguments[1]
auto = rrplugins.Plugin("tel_auto2000")
auto_parameters = input_arguments[2]
antimony_r = __loada(ant_str)
# # making the directory auto_fort_files if is does not exist
# if not os.path.isdir("./auto_fort_files"):
# os.mkdir("./auto_fort_files")
auto.setProperty("SBML", antimony_r.getCurrentSBML())
auto.setProperty("ScanDirection", direction)
auto.setProperty("PreSimulation", "True")
auto.setProperty("PreSimulationDuration", 1.0)
auto.setProperty('KeepTempFiles', True)
auto.setProperty("TempFolder", "auto_fort_files")
# assigning values provided by the user
for i in auto_parameters.keys():
auto.setProperty(i, auto_parameters[i])
try:
auto.execute()
# indices where special points are
pts = auto.BifurcationPoints
# labeling of special points
lbls = auto.BifurcationLabels
# all data for parameters and species found by continuation
bi_data = auto.BifurcationData
# convertes bi_data to numpy array, where first
# column is the principal continuation parameter and
# the rest of the columns are the species
bi_data_np = bi_data.toNumpy
flag = True
except Exception as e:
flag = False
pts = []
lbls = []
bi_data_np = numpy.zeros(2)
ant_float_ids = antimony_r.model.getFloatingSpeciesIds()
numpy.save('bi_data_np.npy', bi_data_np)
output_arguments = [pts, lbls, ant_float_ids, flag]
if os.path.exists("output_arguments.pickle"):
os.remove("output_arguments.pickle")
with open('output_arguments.pickle', 'wb') as outf:
outf.write(pickle.dumps(output_arguments))
else:
with open('output_arguments.pickle', 'wb') as outf:
outf.write(pickle.dumps(output_arguments))
os.close(stderr_pipe[0])
os.dup2(stderr_save, stderr_fileno)
os.close(stderr_save)
os.close(stderr_fileno)
| 27.612069 | 71 | 0.73119 | import os
import pickle
import numpy
import antimony
import roadrunner
import rrplugins
import sys
roadrunner.Logger.setLevel(roadrunner.Logger.LOG_ERROR)
roadrunner.Logger.disableLogging()
roadrunner.Logger.disableConsoleLogging()
roadrunner.Logger.disableFileLogging()
rrplugins.setLogLevel('error')
stderr_fileno = sys.stderr.fileno()
stderr_save = os.dup(stderr_fileno)
stderr_pipe = os.pipe()
os.dup2(stderr_pipe[1], stderr_fileno)
os.close(stderr_pipe[1])
to_parameters.keys():
auto.setProperty(i, auto_parameters[i])
try:
auto.execute()
pts = auto.BifurcationPoints
lbls = auto.BifurcationLabels
bi_data = auto.BifurcationData
bi_data_np = bi_data.toNumpy
flag = True
except Exception as e:
flag = False
pts = []
lbls = []
bi_data_np = numpy.zeros(2)
ant_float_ids = antimony_r.model.getFloatingSpeciesIds()
numpy.save('bi_data_np.npy', bi_data_np)
output_arguments = [pts, lbls, ant_float_ids, flag]
if os.path.exists("output_arguments.pickle"):
os.remove("output_arguments.pickle")
with open('output_arguments.pickle', 'wb') as outf:
outf.write(pickle.dumps(output_arguments))
else:
with open('output_arguments.pickle', 'wb') as outf:
outf.write(pickle.dumps(output_arguments))
os.close(stderr_pipe[0])
os.dup2(stderr_save, stderr_fileno)
os.close(stderr_save)
os.close(stderr_fileno)
| true | true |
f72619392a6697759ea4f17f8067b0b5a3548221 | 6,841 | py | Python | pdm/resolver/providers.py | linw1995/pdm | f2f67f17efd9cd8593ce06a4933cc2303890dcec | [
"MIT"
] | null | null | null | pdm/resolver/providers.py | linw1995/pdm | f2f67f17efd9cd8593ce06a4933cc2303890dcec | [
"MIT"
] | null | null | null | pdm/resolver/providers.py | linw1995/pdm | f2f67f17efd9cd8593ce06a4933cc2303890dcec | [
"MIT"
] | null | null | null | from typing import Any, Dict, Iterable, List, Optional, Union
from resolvelib import AbstractProvider
from resolvelib.resolvers import RequirementInformation
from pdm.models.candidates import Candidate
from pdm.models.repositories import BaseRepository
from pdm.models.requirements import Requirement
from pdm.models.specifiers import PySpecSet
from pdm.utils import url_without_fragments
class BaseProvider(AbstractProvider):
def __init__(
self,
repository: BaseRepository,
requires_python: PySpecSet,
allow_prereleases: Optional[bool] = None,
) -> None:
self.repository = repository
self.requires_python = requires_python # Root python_requires value
self.allow_prereleases = allow_prereleases # Root allow_prereleases value
self.requires_python_collection: Dict[Optional[str], PySpecSet] = {}
self.summary_collection: Dict[str, str] = {}
self.fetched_dependencies: Dict[str, List[Requirement]] = {}
def identify(self, req: Union[Requirement, Candidate]) -> Optional[str]:
return req.identify()
def get_preference(
self,
resolution: Candidate,
candidates: List[Candidate],
information: List[RequirementInformation],
) -> int:
return len(candidates)
def find_matches(self, requirements: List[Requirement]) -> Iterable[Candidate]:
file_req = next((req for req in requirements if not req.is_named), None)
if file_req:
can = Candidate(file_req, self.repository.environment)
can.get_metadata()
candidates = [can]
else:
candidates = self.repository.find_candidates(
requirements[0], self.requires_python, self.allow_prereleases
)
return [
can
for can in candidates
if all(self.is_satisfied_by(r, can) for r in requirements)
]
def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:
if not requirement.is_named:
return not candidate.req.is_named and url_without_fragments(
candidate.req.url
) == url_without_fragments(requirement.url)
if not candidate.version:
candidate.get_metadata()
if getattr(candidate, "_preferred", False) and not candidate._requires_python:
candidate.requires_python = str(
self.repository.get_dependencies(candidate)[1]
)
allow_prereleases = requirement.allow_prereleases
if allow_prereleases is None:
allow_prereleases = self.allow_prereleases
if allow_prereleases is None:
# if not specified, should allow what `find_candidates()` returns
allow_prereleases = True
requires_python = self.requires_python & requirement.requires_python
return requirement.specifier.contains(
candidate.version, allow_prereleases
) and requires_python.is_subset(candidate.requires_python)
def get_dependencies(self, candidate: Candidate) -> List[Requirement]:
deps, requires_python, summary = self.repository.get_dependencies(candidate)
# Filter out incompatible dependencies(e.g. functools32) early so that
# we don't get errors when building wheels.
valid_deps: List[Requirement] = []
for dep in deps:
if (
dep.requires_python & requires_python & self.requires_python
).is_impossible:
continue
dep.requires_python &= candidate.req.requires_python
valid_deps.append(dep)
candidate_key = self.identify(candidate)
self.fetched_dependencies[candidate_key] = valid_deps
self.summary_collection[candidate.req.key] = summary
self.requires_python_collection[candidate.req.key] = requires_python
return valid_deps
def get_hashes(self, candidate: Candidate) -> Optional[Dict[str, str]]:
return self.repository.get_hashes(candidate)
class ReusePinProvider(BaseProvider):
"""A provider that reuses preferred pins if possible.
This is used to implement "add", "remove", and "reuse upgrade",
where already-pinned candidates in lockfile should be preferred.
"""
def __init__(
self,
preferred_pins: Dict[str, Candidate],
tracked_names: Iterable[str],
*args: Any
) -> None:
super().__init__(*args)
self.preferred_pins = preferred_pins
self.tracked_names = set(tracked_names)
def find_matches(self, requirements: List[Requirement]) -> Iterable[Candidate]:
ident = self.identify(requirements[0])
if ident not in self.tracked_names and ident in self.preferred_pins:
pin = self.preferred_pins[ident]
pin._preferred = True
yield pin
yield from super().find_matches(requirements)
class EagerUpdateProvider(ReusePinProvider):
"""A specialized provider to handle an "eager" upgrade strategy.
An eager upgrade tries to upgrade not only packages specified, but also
their dependencies (recursively). This contrasts to the "only-if-needed"
default, which only promises to upgrade the specified package, and
prevents touching anything else if at all possible.
The provider is implemented as to keep track of all dependencies of the
specified packages to upgrade, and free their pins when it has a chance.
"""
def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:
# If this is a tracking package, tell the resolver out of using the
# preferred pin, and into a "normal" candidate selection process.
if self.identify(requirement) in self.tracked_names and getattr(
candidate, "_preferred", False
):
return False
return super().is_satisfied_by(requirement, candidate)
def get_dependencies(self, candidate: Candidate) -> List[Requirement]:
# If this package is being tracked for upgrade, remove pins of its
# dependencies, and start tracking these new packages.
dependencies = super().get_dependencies(candidate)
if self.identify(candidate) in self.tracked_names:
for dependency in dependencies:
name = self.identify(dependency)
self.tracked_names.add(name)
return dependencies
def get_preference(
self,
resolution: Candidate,
candidates: List[Candidate],
information: List[RequirementInformation],
) -> int:
# Resolve tracking packages so we have a chance to unpin them first.
name = self.identify(candidates[0])
if name in self.tracked_names:
return -1
return len(candidates)
| 40.720238 | 86 | 0.673586 | from typing import Any, Dict, Iterable, List, Optional, Union
from resolvelib import AbstractProvider
from resolvelib.resolvers import RequirementInformation
from pdm.models.candidates import Candidate
from pdm.models.repositories import BaseRepository
from pdm.models.requirements import Requirement
from pdm.models.specifiers import PySpecSet
from pdm.utils import url_without_fragments
class BaseProvider(AbstractProvider):
def __init__(
self,
repository: BaseRepository,
requires_python: PySpecSet,
allow_prereleases: Optional[bool] = None,
) -> None:
self.repository = repository
self.requires_python = requires_python
self.allow_prereleases = allow_prereleases
self.requires_python_collection: Dict[Optional[str], PySpecSet] = {}
self.summary_collection: Dict[str, str] = {}
self.fetched_dependencies: Dict[str, List[Requirement]] = {}
def identify(self, req: Union[Requirement, Candidate]) -> Optional[str]:
return req.identify()
def get_preference(
self,
resolution: Candidate,
candidates: List[Candidate],
information: List[RequirementInformation],
) -> int:
return len(candidates)
def find_matches(self, requirements: List[Requirement]) -> Iterable[Candidate]:
file_req = next((req for req in requirements if not req.is_named), None)
if file_req:
can = Candidate(file_req, self.repository.environment)
can.get_metadata()
candidates = [can]
else:
candidates = self.repository.find_candidates(
requirements[0], self.requires_python, self.allow_prereleases
)
return [
can
for can in candidates
if all(self.is_satisfied_by(r, can) for r in requirements)
]
def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:
if not requirement.is_named:
return not candidate.req.is_named and url_without_fragments(
candidate.req.url
) == url_without_fragments(requirement.url)
if not candidate.version:
candidate.get_metadata()
if getattr(candidate, "_preferred", False) and not candidate._requires_python:
candidate.requires_python = str(
self.repository.get_dependencies(candidate)[1]
)
allow_prereleases = requirement.allow_prereleases
if allow_prereleases is None:
allow_prereleases = self.allow_prereleases
if allow_prereleases is None:
allow_prereleases = True
requires_python = self.requires_python & requirement.requires_python
return requirement.specifier.contains(
candidate.version, allow_prereleases
) and requires_python.is_subset(candidate.requires_python)
def get_dependencies(self, candidate: Candidate) -> List[Requirement]:
deps, requires_python, summary = self.repository.get_dependencies(candidate)
valid_deps: List[Requirement] = []
for dep in deps:
if (
dep.requires_python & requires_python & self.requires_python
).is_impossible:
continue
dep.requires_python &= candidate.req.requires_python
valid_deps.append(dep)
candidate_key = self.identify(candidate)
self.fetched_dependencies[candidate_key] = valid_deps
self.summary_collection[candidate.req.key] = summary
self.requires_python_collection[candidate.req.key] = requires_python
return valid_deps
def get_hashes(self, candidate: Candidate) -> Optional[Dict[str, str]]:
return self.repository.get_hashes(candidate)
class ReusePinProvider(BaseProvider):
def __init__(
self,
preferred_pins: Dict[str, Candidate],
tracked_names: Iterable[str],
*args: Any
) -> None:
super().__init__(*args)
self.preferred_pins = preferred_pins
self.tracked_names = set(tracked_names)
def find_matches(self, requirements: List[Requirement]) -> Iterable[Candidate]:
ident = self.identify(requirements[0])
if ident not in self.tracked_names and ident in self.preferred_pins:
pin = self.preferred_pins[ident]
pin._preferred = True
yield pin
yield from super().find_matches(requirements)
class EagerUpdateProvider(ReusePinProvider):
def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:
# If this is a tracking package, tell the resolver out of using the
# preferred pin, and into a "normal" candidate selection process.
if self.identify(requirement) in self.tracked_names and getattr(
candidate, "_preferred", False
):
return False
return super().is_satisfied_by(requirement, candidate)
def get_dependencies(self, candidate: Candidate) -> List[Requirement]:
# If this package is being tracked for upgrade, remove pins of its
# dependencies, and start tracking these new packages.
dependencies = super().get_dependencies(candidate)
if self.identify(candidate) in self.tracked_names:
for dependency in dependencies:
name = self.identify(dependency)
self.tracked_names.add(name)
return dependencies
def get_preference(
self,
resolution: Candidate,
candidates: List[Candidate],
information: List[RequirementInformation],
) -> int:
# Resolve tracking packages so we have a chance to unpin them first.
name = self.identify(candidates[0])
if name in self.tracked_names:
return -1
return len(candidates)
| true | true |
f726193a37608aae08e652a88fa96a6e2af5182f | 427 | py | Python | Python1/python1/word_counter.py | ceeblet/OST_PythonCertificationTrack | 042e0ce964bc88b3f4132dcbd7e06c5f504eae34 | [
"MIT"
] | null | null | null | Python1/python1/word_counter.py | ceeblet/OST_PythonCertificationTrack | 042e0ce964bc88b3f4132dcbd7e06c5f504eae34 | [
"MIT"
] | null | null | null | Python1/python1/word_counter.py | ceeblet/OST_PythonCertificationTrack | 042e0ce964bc88b3f4132dcbd7e06c5f504eae34 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
"""Count the number of different words in a text."""
text = """\
Baa, Baa, Black sheep,
Have you any wool?
Yes sir, yes sir,
Three bags full;
One for the master,
And one for the dame,
And one for the little boy
Who lives down the lane."""
for punc in ",?;.":
text = text.replace(punc, "")
print(text)
words = set(text.lower().split())
print("There are", len(words), "distinct words in the text.") | 23.722222 | 61 | 0.672131 |
text = """\
Baa, Baa, Black sheep,
Have you any wool?
Yes sir, yes sir,
Three bags full;
One for the master,
And one for the dame,
And one for the little boy
Who lives down the lane."""
for punc in ",?;.":
text = text.replace(punc, "")
print(text)
words = set(text.lower().split())
print("There are", len(words), "distinct words in the text.") | true | true |
f72619f4db7804ed8962e86a9e8a707a354a4d0e | 32,049 | py | Python | ros2interface/test/test_cli.py | craigh92/ros2cli | 6c1af39c728145942346b40e998b8a3984f1b6c1 | [
"Apache-2.0"
] | null | null | null | ros2interface/test/test_cli.py | craigh92/ros2cli | 6c1af39c728145942346b40e998b8a3984f1b6c1 | [
"Apache-2.0"
] | null | null | null | ros2interface/test/test_cli.py | craigh92/ros2cli | 6c1af39c728145942346b40e998b8a3984f1b6c1 | [
"Apache-2.0"
] | 1 | 2021-01-20T03:26:07.000Z | 2021-01-20T03:26:07.000Z | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import itertools
import re
import sys
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing
import launch_testing.actions
import launch_testing.asserts
import launch_testing.markers
import launch_testing.tools
import pytest
# Skip cli tests on Windows while they exhibit pathological behavior
# https://github.com/ros2/build_farmer/issues/248
if sys.platform.startswith('win'):
pytest.skip(
'CLI tests can block for a pathological amount of time on Windows.',
allow_module_level=True)
some_messages_from_test_msgs = [
'test_msgs/msg/BasicTypes',
'test_msgs/msg/Constants',
'test_msgs/msg/Strings',
]
some_services_from_test_msgs = [
'test_msgs/srv/Arrays',
'test_msgs/srv/BasicTypes',
'test_msgs/srv/Empty',
]
some_actions_from_test_msgs = [
'test_msgs/action/Fibonacci'
]
some_interfaces = (
some_messages_from_test_msgs +
some_services_from_test_msgs +
some_actions_from_test_msgs
)
@pytest.mark.rostest
@launch_testing.markers.keep_alive
def generate_test_description():
return LaunchDescription([launch_testing.actions.ReadyToTest()])
class TestROS2InterfaceCLI(unittest.TestCase):
@classmethod
def setUpClass(
cls,
launch_service,
proc_info,
proc_output
):
@contextlib.contextmanager
def launch_interface_command(self, arguments, prepend_arguments=[], shell=False):
interface_command_action = ExecuteProcess(
cmd=[*prepend_arguments, 'ros2', 'interface', *arguments],
additional_env={'PYTHONUNBUFFERED': '1'},
name='ros2interface-cli',
shell=shell,
output='screen'
)
with launch_testing.tools.launch_process(
launch_service, interface_command_action, proc_info, proc_output
) as interface_command:
yield interface_command
cls.launch_interface_command = launch_interface_command
def test_list_interfaces(self):
with self.launch_interface_command(arguments=['list']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
filter_ = launch_testing.tools.basic_output_filter(
filtered_prefixes=['Messages:', 'Services:', 'Actions:']
)
output_lines = filter_(interface_command.output).splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.repeat(
re.compile(r'\s*[A-z0-9_]+(/[A-z0-9_]+)+'), len(output_lines)
),
lines=output_lines,
strict=True
)
assert launch_testing.tools.expect_output(
expected_lines=some_interfaces,
lines=output_lines,
strict=False
)
def test_list_messages(self):
with self.launch_interface_command(arguments=['list', '-m']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.chain(
['Messages:'], itertools.repeat(
re.compile(r'\s*[A-z0-9_]+(/[A-z0-9_]+)+'), len(output_lines) - 1
)
),
lines=output_lines,
strict=True
)
assert launch_testing.tools.expect_output(
expected_lines=some_messages_from_test_msgs,
lines=output_lines,
strict=False
)
def test_list_services(self):
with self.launch_interface_command(arguments=['list', '-s']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.chain(
['Services:'], itertools.repeat(
re.compile(r'\s*[A-z0-9_]+(/[A-z0-9_]+)+'), len(output_lines) - 1
)
),
lines=output_lines,
strict=True
)
assert launch_testing.tools.expect_output(
expected_lines=some_services_from_test_msgs,
lines=output_lines,
strict=False
)
def test_list_actions(self):
with self.launch_interface_command(arguments=['list', '-a']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.chain(
['Actions:'], itertools.repeat(
re.compile(r'\s*[A-z0-9_]+(/[A-z0-9_]+)+'), len(output_lines) - 1
)
),
lines=output_lines,
strict=True
)
assert launch_testing.tools.expect_output(
expected_lines=some_actions_from_test_msgs,
lines=output_lines,
strict=False
)
def test_package_on_nonexistent_package(self):
with self.launch_interface_command(
arguments=['package', 'not_a_package']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == 1
assert launch_testing.tools.expect_output(
expected_lines=["Unknown package 'not_a_package'"],
text=interface_command.output,
strict=True
)
def test_package_on_test_msgs(self):
with self.launch_interface_command(
arguments=['package', 'test_msgs']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.repeat(
re.compile(r'test_msgs/(msg|srv|action)/[A-z0-9_]+'), len(output_lines)
),
lines=output_lines,
strict=True
)
assert all(interface in output_lines for interface in some_interfaces)
def test_packages(self):
with self.launch_interface_command(arguments=['packages']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert 'test_msgs' in output_lines
def test_packages_with_messages(self):
with self.launch_interface_command(
arguments=['packages', '-m']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert 'test_msgs' in output_lines
def test_packages_with_services(self):
with self.launch_interface_command(
arguments=['packages', '-s']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert 'test_msgs' in output_lines
def test_packages_with_actions(self):
with self.launch_interface_command(
arguments=['packages', '-a']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert 'test_msgs' in output_lines
def test_show_message(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/BasicTypes']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'bool bool_value',
'byte byte_value',
'char char_value',
'float32 float32_value',
'float64 float64_value',
'int8 int8_value',
'uint8 uint8_value',
'int16 int16_value',
'uint16 uint16_value',
'int32 int32_value',
'uint32 uint32_value',
'int64 int64_value',
'uint64 uint64_value',
],
text=interface_command.output,
strict=True
)
def test_show_all_comments_for_message(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/Builtins', '--all-comments']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'builtin_interfaces/Duration duration_value',
'\t# Duration defines a period between two time points.',
'\t# Messages of this datatype are of ROS Time following this design:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# Seconds component, range is valid over any possible int32 value.',
'\tint32 sec',
'',
'\t# Nanoseconds component in the range of [0, 10e9).',
'\tuint32 nanosec',
'builtin_interfaces/Time time_value',
'\t# This message communicates ROS Time defined here:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# The seconds component, valid over all int32 values.',
'\tint32 sec',
'',
'\t# The nanoseconds component, valid in the range [0, 10e9).',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_no_comments_for_message(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/Builtins', '--no-comments']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'builtin_interfaces/Duration duration_value',
'\tint32 sec',
'\tuint32 nanosec',
'builtin_interfaces/Time time_value',
'\tint32 sec',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_service(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/srv/BasicTypes']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'bool bool_value',
'byte byte_value',
'char char_value',
'float32 float32_value',
'float64 float64_value',
'int8 int8_value',
'uint8 uint8_value',
'int16 int16_value',
'uint16 uint16_value',
'int32 int32_value',
'uint32 uint32_value',
'int64 int64_value',
'uint64 uint64_value',
'string string_value',
'---',
'bool bool_value',
'byte byte_value',
'char char_value',
'float32 float32_value',
'float64 float64_value',
'int8 int8_value',
'uint8 uint8_value',
'int16 int16_value',
'uint16 uint16_value',
'int32 int32_value',
'uint32 uint32_value',
'int64 int64_value',
'uint64 uint64_value',
'string string_value',
],
text=interface_command.output,
strict=True
)
def test_show_action(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/action/Fibonacci']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'#goal definition',
'int32 order',
'---',
'#result definition',
'int32[] sequence',
'---',
'#feedback',
'int32[] sequence',
],
text=interface_command.output,
strict=True
)
def test_show_nested_message(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/Nested']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'BasicTypes basic_types_value',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
],
text=interface_command.output,
strict=True
)
def test_show_nested_action(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/action/NestedMessage']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'# goal definition',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
'---',
'# result definition',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
'---',
'# feedback',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_no_comments_for_nested_action(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/action/NestedMessage', '--no-comments']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
'---',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
'---',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_all_comments_for_nested_action(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/action/NestedMessage', '--all-comments']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'# goal definition',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\t# Duration defines a period between two time points.',
'\t\t# Messages of this datatype are of ROS Time following this design:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# Seconds component, range is valid over any possible int32 value.',
'\t\tint32 sec',
'',
'\t\t# Nanoseconds component in the range of [0, 10e9).',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\t# This message communicates ROS Time defined here:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# The seconds component, valid over all int32 values.',
'\t\tint32 sec',
'',
'\t\t# The nanoseconds component, valid in the range [0, 10e9).',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\t# This message communicates ROS Time defined here:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# The seconds component, valid over all int32 values.',
'\tint32 sec',
'',
'\t# The nanoseconds component, valid in the range [0, 10e9).',
'\tuint32 nanosec',
'---',
'# result definition',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\t# Duration defines a period between two time points.',
'\t\t# Messages of this datatype are of ROS Time following this design:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# Seconds component, range is valid over any possible int32 value.',
'\t\tint32 sec',
'',
'\t\t# Nanoseconds component in the range of [0, 10e9).',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\t# This message communicates ROS Time defined here:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# The seconds component, valid over all int32 values.',
'\t\tint32 sec',
'',
'\t\t# The nanoseconds component, valid in the range [0, 10e9).',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\t# This message communicates ROS Time defined here:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# The seconds component, valid over all int32 values.',
'\tint32 sec',
'',
'\t# The nanoseconds component, valid in the range [0, 10e9).',
'\tuint32 nanosec',
'---',
'# feedback',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\t# Duration defines a period between two time points.',
'\t\t# Messages of this datatype are of ROS Time following this design:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# Seconds component, range is valid over any possible int32 value.',
'\t\tint32 sec',
'',
'\t\t# Nanoseconds component in the range of [0, 10e9).',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\t# This message communicates ROS Time defined here:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# The seconds component, valid over all int32 values.',
'\t\tint32 sec',
'',
'\t\t# The nanoseconds component, valid in the range [0, 10e9).',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\t# This message communicates ROS Time defined here:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# The seconds component, valid over all int32 values.',
'\tint32 sec',
'',
'\t# The nanoseconds component, valid in the range [0, 10e9).',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_not_a_package(self):
with self.launch_interface_command(
arguments=['show', 'not_a_package/msg/String']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == 1
assert launch_testing.tools.expect_output(
expected_lines=["Unknown package 'not_a_package'"],
text=interface_command.output,
strict=True
)
def test_show_not_an_interface(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/NotAMessageTypeName']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == 1
assert launch_testing.tools.expect_output(
expected_lines=[re.compile(
r"Could not find the interface '.+NotAMessageTypeName\.idl'"
)],
text=interface_command.output,
strict=True
)
def test_show_stdin(self):
with self.launch_interface_command(
arguments=['show', '-'],
prepend_arguments=[
sys.executable, '-c', r'"print(\"test_msgs/msg/BasicTypes\")"', '|'
],
shell=True
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'bool bool_value',
'byte byte_value',
'char char_value',
'float32 float32_value',
'float64 float64_value',
'int8 int8_value',
'uint8 uint8_value',
'int16 int16_value',
'uint16 uint16_value',
'int32 int32_value',
'uint32 uint32_value',
'int64 int64_value',
'uint64 uint64_value',
],
text=interface_command.output,
strict=True
)
| 40.931034 | 90 | 0.55568 |
import contextlib
import itertools
import re
import sys
import unittest
from launch import LaunchDescription
from launch.actions import ExecuteProcess
import launch_testing
import launch_testing.actions
import launch_testing.asserts
import launch_testing.markers
import launch_testing.tools
import pytest
if sys.platform.startswith('win'):
pytest.skip(
'CLI tests can block for a pathological amount of time on Windows.',
allow_module_level=True)
some_messages_from_test_msgs = [
'test_msgs/msg/BasicTypes',
'test_msgs/msg/Constants',
'test_msgs/msg/Strings',
]
some_services_from_test_msgs = [
'test_msgs/srv/Arrays',
'test_msgs/srv/BasicTypes',
'test_msgs/srv/Empty',
]
some_actions_from_test_msgs = [
'test_msgs/action/Fibonacci'
]
some_interfaces = (
some_messages_from_test_msgs +
some_services_from_test_msgs +
some_actions_from_test_msgs
)
@pytest.mark.rostest
@launch_testing.markers.keep_alive
def generate_test_description():
return LaunchDescription([launch_testing.actions.ReadyToTest()])
class TestROS2InterfaceCLI(unittest.TestCase):
@classmethod
def setUpClass(
cls,
launch_service,
proc_info,
proc_output
):
@contextlib.contextmanager
def launch_interface_command(self, arguments, prepend_arguments=[], shell=False):
interface_command_action = ExecuteProcess(
cmd=[*prepend_arguments, 'ros2', 'interface', *arguments],
additional_env={'PYTHONUNBUFFERED': '1'},
name='ros2interface-cli',
shell=shell,
output='screen'
)
with launch_testing.tools.launch_process(
launch_service, interface_command_action, proc_info, proc_output
) as interface_command:
yield interface_command
cls.launch_interface_command = launch_interface_command
def test_list_interfaces(self):
with self.launch_interface_command(arguments=['list']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
filter_ = launch_testing.tools.basic_output_filter(
filtered_prefixes=['Messages:', 'Services:', 'Actions:']
)
output_lines = filter_(interface_command.output).splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.repeat(
re.compile(r'\s*[A-z0-9_]+(/[A-z0-9_]+)+'), len(output_lines)
),
lines=output_lines,
strict=True
)
assert launch_testing.tools.expect_output(
expected_lines=some_interfaces,
lines=output_lines,
strict=False
)
def test_list_messages(self):
with self.launch_interface_command(arguments=['list', '-m']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.chain(
['Messages:'], itertools.repeat(
re.compile(r'\s*[A-z0-9_]+(/[A-z0-9_]+)+'), len(output_lines) - 1
)
),
lines=output_lines,
strict=True
)
assert launch_testing.tools.expect_output(
expected_lines=some_messages_from_test_msgs,
lines=output_lines,
strict=False
)
def test_list_services(self):
with self.launch_interface_command(arguments=['list', '-s']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.chain(
['Services:'], itertools.repeat(
re.compile(r'\s*[A-z0-9_]+(/[A-z0-9_]+)+'), len(output_lines) - 1
)
),
lines=output_lines,
strict=True
)
assert launch_testing.tools.expect_output(
expected_lines=some_services_from_test_msgs,
lines=output_lines,
strict=False
)
def test_list_actions(self):
with self.launch_interface_command(arguments=['list', '-a']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.chain(
['Actions:'], itertools.repeat(
re.compile(r'\s*[A-z0-9_]+(/[A-z0-9_]+)+'), len(output_lines) - 1
)
),
lines=output_lines,
strict=True
)
assert launch_testing.tools.expect_output(
expected_lines=some_actions_from_test_msgs,
lines=output_lines,
strict=False
)
def test_package_on_nonexistent_package(self):
with self.launch_interface_command(
arguments=['package', 'not_a_package']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == 1
assert launch_testing.tools.expect_output(
expected_lines=["Unknown package 'not_a_package'"],
text=interface_command.output,
strict=True
)
def test_package_on_test_msgs(self):
with self.launch_interface_command(
arguments=['package', 'test_msgs']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert launch_testing.tools.expect_output(
expected_lines=itertools.repeat(
re.compile(r'test_msgs/(msg|srv|action)/[A-z0-9_]+'), len(output_lines)
),
lines=output_lines,
strict=True
)
assert all(interface in output_lines for interface in some_interfaces)
def test_packages(self):
with self.launch_interface_command(arguments=['packages']) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert 'test_msgs' in output_lines
def test_packages_with_messages(self):
with self.launch_interface_command(
arguments=['packages', '-m']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert 'test_msgs' in output_lines
def test_packages_with_services(self):
with self.launch_interface_command(
arguments=['packages', '-s']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert 'test_msgs' in output_lines
def test_packages_with_actions(self):
with self.launch_interface_command(
arguments=['packages', '-a']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
output_lines = interface_command.output.splitlines()
assert 'test_msgs' in output_lines
def test_show_message(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/BasicTypes']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'bool bool_value',
'byte byte_value',
'char char_value',
'float32 float32_value',
'float64 float64_value',
'int8 int8_value',
'uint8 uint8_value',
'int16 int16_value',
'uint16 uint16_value',
'int32 int32_value',
'uint32 uint32_value',
'int64 int64_value',
'uint64 uint64_value',
],
text=interface_command.output,
strict=True
)
def test_show_all_comments_for_message(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/Builtins', '--all-comments']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'builtin_interfaces/Duration duration_value',
'\t# Duration defines a period between two time points.',
'\t# Messages of this datatype are of ROS Time following this design:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# Seconds component, range is valid over any possible int32 value.',
'\tint32 sec',
'',
'\t# Nanoseconds component in the range of [0, 10e9).',
'\tuint32 nanosec',
'builtin_interfaces/Time time_value',
'\t# This message communicates ROS Time defined here:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# The seconds component, valid over all int32 values.',
'\tint32 sec',
'',
'\t# The nanoseconds component, valid in the range [0, 10e9).',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_no_comments_for_message(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/Builtins', '--no-comments']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'builtin_interfaces/Duration duration_value',
'\tint32 sec',
'\tuint32 nanosec',
'builtin_interfaces/Time time_value',
'\tint32 sec',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_service(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/srv/BasicTypes']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'bool bool_value',
'byte byte_value',
'char char_value',
'float32 float32_value',
'float64 float64_value',
'int8 int8_value',
'uint8 uint8_value',
'int16 int16_value',
'uint16 uint16_value',
'int32 int32_value',
'uint32 uint32_value',
'int64 int64_value',
'uint64 uint64_value',
'string string_value',
'---',
'bool bool_value',
'byte byte_value',
'char char_value',
'float32 float32_value',
'float64 float64_value',
'int8 int8_value',
'uint8 uint8_value',
'int16 int16_value',
'uint16 uint16_value',
'int32 int32_value',
'uint32 uint32_value',
'int64 int64_value',
'uint64 uint64_value',
'string string_value',
],
text=interface_command.output,
strict=True
)
def test_show_action(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/action/Fibonacci']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'#goal definition',
'int32 order',
'---',
'#result definition',
'int32[] sequence',
'---',
'#feedback',
'int32[] sequence',
],
text=interface_command.output,
strict=True
)
def test_show_nested_message(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/Nested']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'BasicTypes basic_types_value',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
],
text=interface_command.output,
strict=True
)
def test_show_nested_action(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/action/NestedMessage']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'# goal definition',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
'---',
'# result definition',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
'---',
'# feedback',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_no_comments_for_nested_action(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/action/NestedMessage', '--no-comments']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
'---',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
'---',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\tint32 sec',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\tint32 sec',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_all_comments_for_nested_action(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/action/NestedMessage', '--all-comments']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'# goal definition',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\t# Duration defines a period between two time points.',
'\t\t# Messages of this datatype are of ROS Time following this design:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# Seconds component, range is valid over any possible int32 value.',
'\t\tint32 sec',
'',
'\t\t# Nanoseconds component in the range of [0, 10e9).',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\t# This message communicates ROS Time defined here:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# The seconds component, valid over all int32 values.',
'\t\tint32 sec',
'',
'\t\t# The nanoseconds component, valid in the range [0, 10e9).',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\t# This message communicates ROS Time defined here:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# The seconds component, valid over all int32 values.',
'\tint32 sec',
'',
'\t# The nanoseconds component, valid in the range [0, 10e9).',
'\tuint32 nanosec',
'---',
'# result definition',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\t# Duration defines a period between two time points.',
'\t\t# Messages of this datatype are of ROS Time following this design:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# Seconds component, range is valid over any possible int32 value.',
'\t\tint32 sec',
'',
'\t\t# Nanoseconds component in the range of [0, 10e9).',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\t# This message communicates ROS Time defined here:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# The seconds component, valid over all int32 values.',
'\t\tint32 sec',
'',
'\t\t# The nanoseconds component, valid in the range [0, 10e9).',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\t# This message communicates ROS Time defined here:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# The seconds component, valid over all int32 values.',
'\tint32 sec',
'',
'\t# The nanoseconds component, valid in the range [0, 10e9).',
'\tuint32 nanosec',
'---',
'# feedback',
'Builtins nested_field_no_pkg',
'\tbuiltin_interfaces/Duration duration_value',
'\t\t# Duration defines a period between two time points.',
'\t\t# Messages of this datatype are of ROS Time following this design:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# Seconds component, range is valid over any possible int32 value.',
'\t\tint32 sec',
'',
'\t\t# Nanoseconds component in the range of [0, 10e9).',
'\t\tuint32 nanosec',
'\tbuiltin_interfaces/Time time_value',
'\t\t# This message communicates ROS Time defined here:',
'\t\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t\t# The seconds component, valid over all int32 values.',
'\t\tint32 sec',
'',
'\t\t# The nanoseconds component, valid in the range [0, 10e9).',
'\t\tuint32 nanosec',
'test_msgs/BasicTypes nested_field',
'\tbool bool_value',
'\tbyte byte_value',
'\tchar char_value',
'\tfloat32 float32_value',
'\tfloat64 float64_value',
'\tint8 int8_value',
'\tuint8 uint8_value',
'\tint16 int16_value',
'\tuint16 uint16_value',
'\tint32 int32_value',
'\tuint32 uint32_value',
'\tint64 int64_value',
'\tuint64 uint64_value',
'builtin_interfaces/Time nested_different_pkg',
'\t# This message communicates ROS Time defined here:',
'\t# https://design.ros2.org/articles/clock_and_time.html',
'',
'\t# The seconds component, valid over all int32 values.',
'\tint32 sec',
'',
'\t# The nanoseconds component, valid in the range [0, 10e9).',
'\tuint32 nanosec',
],
text=interface_command.output,
strict=True
)
def test_show_not_a_package(self):
with self.launch_interface_command(
arguments=['show', 'not_a_package/msg/String']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == 1
assert launch_testing.tools.expect_output(
expected_lines=["Unknown package 'not_a_package'"],
text=interface_command.output,
strict=True
)
def test_show_not_an_interface(self):
with self.launch_interface_command(
arguments=['show', 'test_msgs/msg/NotAMessageTypeName']
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == 1
assert launch_testing.tools.expect_output(
expected_lines=[re.compile(
r"Could not find the interface '.+NotAMessageTypeName\.idl'"
)],
text=interface_command.output,
strict=True
)
def test_show_stdin(self):
with self.launch_interface_command(
arguments=['show', '-'],
prepend_arguments=[
sys.executable, '-c', r'"print(\"test_msgs/msg/BasicTypes\")"', '|'
],
shell=True
) as interface_command:
assert interface_command.wait_for_shutdown(timeout=2)
assert interface_command.exit_code == launch_testing.asserts.EXIT_OK
assert launch_testing.tools.expect_output(
expected_lines=[
'bool bool_value',
'byte byte_value',
'char char_value',
'float32 float32_value',
'float64 float64_value',
'int8 int8_value',
'uint8 uint8_value',
'int16 int16_value',
'uint16 uint16_value',
'int32 int32_value',
'uint32 uint32_value',
'int64 int64_value',
'uint64 uint64_value',
],
text=interface_command.output,
strict=True
)
| true | true |
f7261a33bda347e1351ccdc5511b79b1e79f4d69 | 10,223 | py | Python | we.py | TimeTraveller-San/FairGAN | 526c2937714fc322714db54dc6a3f392f2c88e18 | [
"MIT"
] | 8 | 2020-03-06T12:53:53.000Z | 2021-08-31T18:11:36.000Z | we.py | TimeTraveller-San/FairGAN | 526c2937714fc322714db54dc6a3f392f2c88e18 | [
"MIT"
] | null | null | null | we.py | TimeTraveller-San/FairGAN | 526c2937714fc322714db54dc6a3f392f2c88e18 | [
"MIT"
] | null | null | null | from __future__ import print_function, division
import re
import sys
import numpy as np
import scipy.sparse
import codecs
from sklearn.decomposition import PCA
if sys.version_info[0] < 3:
import io
open = io.open
else:
unicode = str
"""
Tools for debiasing word embeddings
Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings
Tolga Bolukbasi, Kai-Wei Chang, James Zou, Venkatesh Saligrama, and Adam Kalai
2016
"""
DEFAULT_NUM_WORDS = 27000
FILENAMES = {"g_wiki": "glove.6B.300d.small.txt",
"g_twitter": "glove.twitter.27B.200d.small.txt",
"g_crawl": "glove.840B.300d.small.txt",
"w2v": "GoogleNews-word2vec.small.txt",
"w2v_large": "GoogleNews-word2vec.txt"}
def dedup(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
def safe_word(w):
# ignore words with numbers, etc.
# [a-zA-Z\.'_\- :;\(\)\]] for emoticons
return (re.match(r"^[a-z_]*$", w) and len(w) < 20 and not re.match(r"^_*$", w))
def to_utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
def load_embeddings_from_np(filename):
print('loading ...')
with codecs.open(filename + '.vocab', 'r', 'utf-8') as f_embed:
vocab = [line.strip() for line in f_embed]
wv = np.load(filename + '.wv.npy')
return vocab, wv
class WordEmbedding:
def __init__(self, fname):
self.thresh = None
self.max_words = None
self.desc = fname
print("*** Reading data from " + fname)
if fname.endswith(".bin"):
import gensim.models
model = gensim.models.KeyedVectors.load_word2vec_format(fname, binary=True)
words = sorted([w for w in model.vocab], key=lambda w: model.vocab[w].index)
vecs = [model[w] for w in words]
elif fname.endswith(".txt"):
print("Loading w2vec format")
vecs = []
words = []
with open(fname, "r") as f:
lines = f.readlines()
for line in lines:
tokens = line.split()
v = np.array([float(x) for x in tokens[-300:]])
w = "_".join([str(x) for x in tokens[:-300]])
if len(v) != 300:
print(f"Weird line: {tokens} | {len(v)}")
continue
words.append(w)
vecs.append(v)
else:
print("Loading numpy format")
words, vecs = load_embeddings_from_np(fname)
self.vecs = np.array(vecs, dtype='float32')
print(self.vecs.shape)
self.words = words
self.reindex()
norms = np.linalg.norm(self.vecs, axis=1)
if max(norms)-min(norms) > 0.0001:
self.normalize()
def reindex(self):
self.index = {w: i for i, w in enumerate(self.words)}
self.rindex = {i: w for i, w in enumerate(self.words)}
self.n, self.d = self.vecs.shape
assert self.n == len(self.words) == len(self.index)
self._neighbors = None
print(self.n, "words of dimension", self.d, ":", ", ".join(self.words[:4] + ["..."] + self.words[-4:]))
def v(self, word):
return self.vecs[self.index[word]]
def diff(self, word1, word2):
v = self.vecs[self.index[word1]] - self.vecs[self.index[word2]]
return v/np.linalg.norm(v)
def normalize(self):
self.desc += ", normalize"
self.vecs /= np.linalg.norm(self.vecs, axis=1)[:, np.newaxis]
self.reindex()
def shrink(self, numwords):
self.desc += ", shrink " + str(numwords)
self.filter_words(lambda w: self.index[w]<numwords)
def filter_words(self, test):
"""
Keep some words based on test, e.g. lambda x: x.lower()==x
"""
self.desc += ", filter"
kept_indices, words = zip(*[[i, w] for i, w in enumerate(self.words) if test(w)])
self.words = list(words)
self.vecs = self.vecs[kept_indices, :]
self.reindex()
def save(self, filename):
with open(filename, "w") as f:
f.write("\n".join([w+" " + " ".join([str(x) for x in v]) for w, v in zip(self.words, self.vecs)]))
print("Wrote", self.n, "words to", filename)
def save_w2v(self, filename, binary=True):
with open(filename, 'wb') as fout:
fout.write(to_utf8("%s %s\n" % self.vecs.shape))
# store in sorted order: most frequent words at the top
for i, word in enumerate(self.words):
row = self.vecs[i]
if binary:
fout.write(to_utf8(word) + b" " + row.tostring())
else:
fout.write(to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
def remove_directions(self, directions): #directions better be orthogonal
self.desc += ", removed"
for direction in directions:
self.desc += " "
if type(direction) is np.ndarray:
v = direction / np.linalg.norm(direction)
self.desc += "vector "
else:
w1, w2 = direction
v = self.diff(w1, w2)
self.desc += w1 + "-" + w2
self.vecs = self.vecs - self.vecs.dot(v)[:, np.newaxis].dot(v[np.newaxis, :])
self.normalize()
def compute_neighbors_if_necessary(self, thresh, max_words):
thresh = float(thresh) # dang python 2.7!
if self._neighbors is not None and self.thresh == thresh and self.max_words == max_words:
return
print("Computing neighbors")
self.thresh = thresh
self.max_words = max_words
vecs = self.vecs[:max_words]
dots = vecs.dot(vecs.T)
dots = scipy.sparse.csr_matrix(dots * (dots >= 1-thresh/2))
from collections import Counter
rows, cols = dots.nonzero()
nums = list(Counter(rows).values())
print("Mean:", np.mean(nums)-1)
print("Median:", np.median(nums)-1)
rows, cols, vecs = zip(*[(i, j, vecs[i]-vecs[j]) for i, j, x in zip(rows, cols, dots.data) if i<j])
self._neighbors = rows, cols, np.array([v/np.linalg.norm(v) for v in vecs])
def neighbors(self, word, thresh=1):
dots = self.vecs.dot(self.v(word))
dd = dict(zip([abs(dot) for dot in dots], [i for i in range(len(dots))]))
ns=[]
for dot in sorted(dd, reverse=True):
if dot>1-thresh/2:
ns.append(self.words[int(dd[dot])])
return ns[1:] #Since first word is the word itself
def neighborsNoSort(self, word, thresh=1):
dots = self.vecs.dot(self.v(word))
dd = dict(zip([abs(dot) for dot in dots], [i for i in range(len(dots))]))
ns=[]
for dot in sorted(dd, reverse=True):
if dot>1-thresh/2:
ns.append(self.words[int(dd[dot])])
return ns[1:] #Since first word is the word itself
def more_words_like_these(self, words, topn=50, max_freq=100000):
v = sum(self.v(w) for w in words)
dots = self.vecs[:max_freq].dot(v)
thresh = sorted(dots)[-topn]
words = [w for w, dot in zip(self.words, dots) if dot>=thresh]
return sorted(words, key=lambda w: self.v(w).dot(v))[-topn:][::-1]
def best_analogies_dist_thresh(self, v, thresh=1, topn=500, max_words=50000):
"""Metric is cos(a-c, b-d) if |b-d|^2 < thresh, otherwise 0
"""
vecs, vocab = self.vecs[:max_words], self.words[:max_words]
self.compute_neighbors_if_necessary(thresh, max_words)
rows, cols, vecs = self._neighbors
scores = vecs.dot(v/np.linalg.norm(v))
pi = np.argsort(-abs(scores))
ans = []
usedL = set()
usedR = set()
for i in pi:
if abs(scores[i])<0.001:
break
row = rows[i] if scores[i] > 0 else cols[i]
col = cols[i] if scores[i] > 0 else rows[i]
if row in usedL or col in usedR:
continue
usedL.add(row)
usedR.add(col)
ans.append((vocab[row], vocab[col], abs(scores[i])))
if len(ans)==topn:
break
return ans
def viz(analogies):
print("\n".join(str(i).rjust(4)+a[0].rjust(29) + " | " + a[1].ljust(29) + (str(a[2]))[:4] for i, a in enumerate(analogies)))
def text_plot_words(xs, ys, words, width = 90, height = 40, filename=None):
PADDING = 10 # num chars on left and right in case words spill over
res = [[' ' for i in range(width)] for j in range(height)]
def rescale(nums):
a = min(nums)
b = max(nums)
return [(x-a)/(b-a) for x in nums]
print("x:", (min(xs), max(xs)), "y:",(min(ys),max(ys)))
xs = rescale(xs)
ys = rescale(ys)
for (x, y, word) in zip(xs, ys, words):
i = int(x*(width - 1 - PADDING))
j = int(y*(height-1))
row = res[j]
z = list(row[i2] != ' ' for i2 in range(max(i-1, 0), min(width, i + len(word) + 1)))
if any(z):
continue
for k in range(len(word)):
if i+k>=width:
break
row[i+k] = word[k]
string = "\n".join("".join(r) for r in res)
# return string
if filename:
with open(filename, "w", encoding="utf8") as f:
f.write(string)
print("Wrote to", filename)
else:
print(string)
def doPCA(pairs, embedding, num_components = 10):
matrix = []
for a, b in pairs:
center = (embedding.v(a) + embedding.v(b))/2
matrix.append(embedding.v(a) - center)
matrix.append(embedding.v(b) - center)
matrix = np.array(matrix)
pca = PCA(n_components = num_components)
pca.fit(matrix)
# bar(range(num_components), pca.explained_variance_ratio_)
return pca
def drop(u, v):
return u - v * u.dot(v) / v.dot(v)
| 36.773381 | 128 | 0.555806 | from __future__ import print_function, division
import re
import sys
import numpy as np
import scipy.sparse
import codecs
from sklearn.decomposition import PCA
if sys.version_info[0] < 3:
import io
open = io.open
else:
unicode = str
DEFAULT_NUM_WORDS = 27000
FILENAMES = {"g_wiki": "glove.6B.300d.small.txt",
"g_twitter": "glove.twitter.27B.200d.small.txt",
"g_crawl": "glove.840B.300d.small.txt",
"w2v": "GoogleNews-word2vec.small.txt",
"w2v_large": "GoogleNews-word2vec.txt"}
def dedup(seq):
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
def safe_word(w):
return (re.match(r"^[a-z_]*$", w) and len(w) < 20 and not re.match(r"^_*$", w))
def to_utf8(text, errors='strict', encoding='utf8'):
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
def load_embeddings_from_np(filename):
print('loading ...')
with codecs.open(filename + '.vocab', 'r', 'utf-8') as f_embed:
vocab = [line.strip() for line in f_embed]
wv = np.load(filename + '.wv.npy')
return vocab, wv
class WordEmbedding:
def __init__(self, fname):
self.thresh = None
self.max_words = None
self.desc = fname
print("*** Reading data from " + fname)
if fname.endswith(".bin"):
import gensim.models
model = gensim.models.KeyedVectors.load_word2vec_format(fname, binary=True)
words = sorted([w for w in model.vocab], key=lambda w: model.vocab[w].index)
vecs = [model[w] for w in words]
elif fname.endswith(".txt"):
print("Loading w2vec format")
vecs = []
words = []
with open(fname, "r") as f:
lines = f.readlines()
for line in lines:
tokens = line.split()
v = np.array([float(x) for x in tokens[-300:]])
w = "_".join([str(x) for x in tokens[:-300]])
if len(v) != 300:
print(f"Weird line: {tokens} | {len(v)}")
continue
words.append(w)
vecs.append(v)
else:
print("Loading numpy format")
words, vecs = load_embeddings_from_np(fname)
self.vecs = np.array(vecs, dtype='float32')
print(self.vecs.shape)
self.words = words
self.reindex()
norms = np.linalg.norm(self.vecs, axis=1)
if max(norms)-min(norms) > 0.0001:
self.normalize()
def reindex(self):
self.index = {w: i for i, w in enumerate(self.words)}
self.rindex = {i: w for i, w in enumerate(self.words)}
self.n, self.d = self.vecs.shape
assert self.n == len(self.words) == len(self.index)
self._neighbors = None
print(self.n, "words of dimension", self.d, ":", ", ".join(self.words[:4] + ["..."] + self.words[-4:]))
def v(self, word):
return self.vecs[self.index[word]]
def diff(self, word1, word2):
v = self.vecs[self.index[word1]] - self.vecs[self.index[word2]]
return v/np.linalg.norm(v)
def normalize(self):
self.desc += ", normalize"
self.vecs /= np.linalg.norm(self.vecs, axis=1)[:, np.newaxis]
self.reindex()
def shrink(self, numwords):
self.desc += ", shrink " + str(numwords)
self.filter_words(lambda w: self.index[w]<numwords)
def filter_words(self, test):
self.desc += ", filter"
kept_indices, words = zip(*[[i, w] for i, w in enumerate(self.words) if test(w)])
self.words = list(words)
self.vecs = self.vecs[kept_indices, :]
self.reindex()
def save(self, filename):
with open(filename, "w") as f:
f.write("\n".join([w+" " + " ".join([str(x) for x in v]) for w, v in zip(self.words, self.vecs)]))
print("Wrote", self.n, "words to", filename)
def save_w2v(self, filename, binary=True):
with open(filename, 'wb') as fout:
fout.write(to_utf8("%s %s\n" % self.vecs.shape))
# store in sorted order: most frequent words at the top
for i, word in enumerate(self.words):
row = self.vecs[i]
if binary:
fout.write(to_utf8(word) + b" " + row.tostring())
else:
fout.write(to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
def remove_directions(self, directions): #directions better be orthogonal
self.desc += ", removed"
for direction in directions:
self.desc += " "
if type(direction) is np.ndarray:
v = direction / np.linalg.norm(direction)
self.desc += "vector "
else:
w1, w2 = direction
v = self.diff(w1, w2)
self.desc += w1 + "-" + w2
self.vecs = self.vecs - self.vecs.dot(v)[:, np.newaxis].dot(v[np.newaxis, :])
self.normalize()
def compute_neighbors_if_necessary(self, thresh, max_words):
thresh = float(thresh) # dang python 2.7!
if self._neighbors is not None and self.thresh == thresh and self.max_words == max_words:
return
print("Computing neighbors")
self.thresh = thresh
self.max_words = max_words
vecs = self.vecs[:max_words]
dots = vecs.dot(vecs.T)
dots = scipy.sparse.csr_matrix(dots * (dots >= 1-thresh/2))
from collections import Counter
rows, cols = dots.nonzero()
nums = list(Counter(rows).values())
print("Mean:", np.mean(nums)-1)
print("Median:", np.median(nums)-1)
rows, cols, vecs = zip(*[(i, j, vecs[i]-vecs[j]) for i, j, x in zip(rows, cols, dots.data) if i<j])
self._neighbors = rows, cols, np.array([v/np.linalg.norm(v) for v in vecs])
def neighbors(self, word, thresh=1):
dots = self.vecs.dot(self.v(word))
dd = dict(zip([abs(dot) for dot in dots], [i for i in range(len(dots))]))
ns=[]
for dot in sorted(dd, reverse=True):
if dot>1-thresh/2:
ns.append(self.words[int(dd[dot])])
return ns[1:] #Since first word is the word itself
def neighborsNoSort(self, word, thresh=1):
dots = self.vecs.dot(self.v(word))
dd = dict(zip([abs(dot) for dot in dots], [i for i in range(len(dots))]))
ns=[]
for dot in sorted(dd, reverse=True):
if dot>1-thresh/2:
ns.append(self.words[int(dd[dot])])
return ns[1:] #Since first word is the word itself
def more_words_like_these(self, words, topn=50, max_freq=100000):
v = sum(self.v(w) for w in words)
dots = self.vecs[:max_freq].dot(v)
thresh = sorted(dots)[-topn]
words = [w for w, dot in zip(self.words, dots) if dot>=thresh]
return sorted(words, key=lambda w: self.v(w).dot(v))[-topn:][::-1]
def best_analogies_dist_thresh(self, v, thresh=1, topn=500, max_words=50000):
vecs, vocab = self.vecs[:max_words], self.words[:max_words]
self.compute_neighbors_if_necessary(thresh, max_words)
rows, cols, vecs = self._neighbors
scores = vecs.dot(v/np.linalg.norm(v))
pi = np.argsort(-abs(scores))
ans = []
usedL = set()
usedR = set()
for i in pi:
if abs(scores[i])<0.001:
break
row = rows[i] if scores[i] > 0 else cols[i]
col = cols[i] if scores[i] > 0 else rows[i]
if row in usedL or col in usedR:
continue
usedL.add(row)
usedR.add(col)
ans.append((vocab[row], vocab[col], abs(scores[i])))
if len(ans)==topn:
break
return ans
def viz(analogies):
print("\n".join(str(i).rjust(4)+a[0].rjust(29) + " | " + a[1].ljust(29) + (str(a[2]))[:4] for i, a in enumerate(analogies)))
def text_plot_words(xs, ys, words, width = 90, height = 40, filename=None):
PADDING = 10 # num chars on left and right in case words spill over
res = [[' ' for i in range(width)] for j in range(height)]
def rescale(nums):
a = min(nums)
b = max(nums)
return [(x-a)/(b-a) for x in nums]
print("x:", (min(xs), max(xs)), "y:",(min(ys),max(ys)))
xs = rescale(xs)
ys = rescale(ys)
for (x, y, word) in zip(xs, ys, words):
i = int(x*(width - 1 - PADDING))
j = int(y*(height-1))
row = res[j]
z = list(row[i2] != ' ' for i2 in range(max(i-1, 0), min(width, i + len(word) + 1)))
if any(z):
continue
for k in range(len(word)):
if i+k>=width:
break
row[i+k] = word[k]
string = "\n".join("".join(r) for r in res)
# return string
if filename:
with open(filename, "w", encoding="utf8") as f:
f.write(string)
print("Wrote to", filename)
else:
print(string)
def doPCA(pairs, embedding, num_components = 10):
matrix = []
for a, b in pairs:
center = (embedding.v(a) + embedding.v(b))/2
matrix.append(embedding.v(a) - center)
matrix.append(embedding.v(b) - center)
matrix = np.array(matrix)
pca = PCA(n_components = num_components)
pca.fit(matrix)
# bar(range(num_components), pca.explained_variance_ratio_)
return pca
def drop(u, v):
return u - v * u.dot(v) / v.dot(v)
| true | true |
f7261adeb044fff4db6dbfd9fb7364d1c5977719 | 2,556 | py | Python | PDK_Generator/inverse_design_y_branch/lumopt/geometries/parameterized_geometry.py | seanlam97/PDK_Generator | 15c1f4f56575f8e21ea874443d06ef740ccb5aa5 | [
"MIT"
] | null | null | null | PDK_Generator/inverse_design_y_branch/lumopt/geometries/parameterized_geometry.py | seanlam97/PDK_Generator | 15c1f4f56575f8e21ea874443d06ef740ccb5aa5 | [
"MIT"
] | 3 | 2021-08-24T23:31:42.000Z | 2021-08-25T16:45:54.000Z | PDK_Generator/inverse_design_y_branch/lumopt/geometries/parameterized_geometry.py | seanlam97/PDK_Generator | 15c1f4f56575f8e21ea874443d06ef740ccb5aa5 | [
"MIT"
] | null | null | null | import numpy as np
import inspect
from lumopt.geometries.geometry import Geometry
class ParameterizedGeometry(Geometry):
"""
Defines a parametrized geometry using any of the built-in geometric structures available in the FDTD CAD.
Users must provide a Python function with the signature ('params', 'fdtd', 'only_update'). The function
must take the optimization parameters and a handle to the FDTD CAD to build the geometry under optimization
(material assignments included). The flag 'only_update' is used to avoid frequent recreations of the parameterized
geometry: when the flag is true, it is assumed that the geometry was already added at least once to the CAD.
Parameters
----------
:param func: function with the signature ('params', 'fdtd', 'only_update', **kwargs).
:param initial_params: flat array with the initial optimization parameter values.
:param bounds: bounding ranges (min/max pairs) for each optimization parameter.
:param dx: step size for computing the figure of merit gradient using permittivity perturbations.
"""
def __init__(self, func, initial_params, bounds, dx, deps_num_threads=1):
self.deps_num_threads=deps_num_threads
self.func = func
self.current_params = np.array(initial_params).flatten()
self.bounds = bounds
self.dx = float(dx)
if inspect.isfunction(self.func):
bound_args = inspect.signature(self.func).bind('params', 'fdtd', 'only_update')
if bound_args.args != ('params', 'fdtd', 'only_update'):
raise UserWarning("user defined function does not take three positional arguments.")
else:
raise UserWarning("argument 'func' must be a Python function.")
if self.dx <= 0.0:
raise UserWarning("step size must be positive.")
self.params_hist = list(self.current_params)
def update_geometry(self, params, sim):
self.current_params = params
self.params_hist.append(params)
def get_current_params(self):
return self.current_params
def calculate_gradients(self, gradient_fields):
raise UserWarning("unsupported gradient calculation method.")
def add_geo(self, sim, params, only_update):
sim.fdtd.switchtolayout()
if params is None:
return self.func(self.current_params, sim.fdtd, only_update)
else:
return self.func(params, sim.fdtd, only_update)
| 45.642857 | 122 | 0.673709 | import numpy as np
import inspect
from lumopt.geometries.geometry import Geometry
class ParameterizedGeometry(Geometry):
def __init__(self, func, initial_params, bounds, dx, deps_num_threads=1):
self.deps_num_threads=deps_num_threads
self.func = func
self.current_params = np.array(initial_params).flatten()
self.bounds = bounds
self.dx = float(dx)
if inspect.isfunction(self.func):
bound_args = inspect.signature(self.func).bind('params', 'fdtd', 'only_update')
if bound_args.args != ('params', 'fdtd', 'only_update'):
raise UserWarning("user defined function does not take three positional arguments.")
else:
raise UserWarning("argument 'func' must be a Python function.")
if self.dx <= 0.0:
raise UserWarning("step size must be positive.")
self.params_hist = list(self.current_params)
def update_geometry(self, params, sim):
self.current_params = params
self.params_hist.append(params)
def get_current_params(self):
return self.current_params
def calculate_gradients(self, gradient_fields):
raise UserWarning("unsupported gradient calculation method.")
def add_geo(self, sim, params, only_update):
sim.fdtd.switchtolayout()
if params is None:
return self.func(self.current_params, sim.fdtd, only_update)
else:
return self.func(params, sim.fdtd, only_update)
| true | true |
f7261d7103b3c953a4264b678c7599501789f591 | 1,580 | py | Python | 2020/python/day-15-cleaned.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | 1 | 2021-12-04T18:09:44.000Z | 2021-12-04T18:09:44.000Z | 2020/python/day-15-cleaned.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | 2020/python/day-15-cleaned.py | tadhg-ohiggins/advent-of-code | d0f113955940e69cbe0953607f62862f8a8bb830 | [
"CC0-1.0"
] | null | null | null | from functools import partial
from itertools import count
from typing import List
from tutils import lmap, splitstrip, load_and_process_input
DAY = "15"
INPUT = f"input-{DAY}.txt"
ANSWER1 = 240
ANSWER2 = 505
testdata = [
([0, 3, 6], 2020, 436),
([1, 3, 2], 2020, 1),
([2, 1, 3], 2020, 10),
([1, 2, 3], 2020, 27),
([2, 3, 1], 2020, 78),
([3, 2, 1], 2020, 438),
([3, 1, 2], 2020, 1836),
]
def find_nth_number(numbers: List[int], ordinal: int) -> int:
positions = {x: i for i, x in enumerate(numbers)}
for index in count(len(numbers) - 1):
current = numbers[index]
lastpos = positions.get(current, False)
positions[current] = index
steps_back = 0 if lastpos is False else index - lastpos
numbers.append(steps_back)
if len(numbers) == ordinal:
break
return steps_back
def process(numbers: List[int], ordinal: int) -> int:
return find_nth_number(numbers, ordinal)
def cli_main() -> None:
input_funcs = [
partial(str.strip),
partial(splitstrip, sep=","),
partial(lmap, int),
]
numbers = load_and_process_input(INPUT, input_funcs)
for nums, ordinal, tanswer in testdata:
testanswer = find_nth_number(nums, ordinal)
assert testanswer == tanswer
answer_one = process(numbers[:], 2020)
assert answer_one == ANSWER1
print("Answer one:", answer_one)
answer_two = process(numbers[:], 30000000)
assert answer_two == ANSWER2
print("Answer two:", answer_two)
if __name__ == "__main__":
cli_main()
| 26.333333 | 63 | 0.621519 | from functools import partial
from itertools import count
from typing import List
from tutils import lmap, splitstrip, load_and_process_input
DAY = "15"
INPUT = f"input-{DAY}.txt"
ANSWER1 = 240
ANSWER2 = 505
testdata = [
([0, 3, 6], 2020, 436),
([1, 3, 2], 2020, 1),
([2, 1, 3], 2020, 10),
([1, 2, 3], 2020, 27),
([2, 3, 1], 2020, 78),
([3, 2, 1], 2020, 438),
([3, 1, 2], 2020, 1836),
]
def find_nth_number(numbers: List[int], ordinal: int) -> int:
positions = {x: i for i, x in enumerate(numbers)}
for index in count(len(numbers) - 1):
current = numbers[index]
lastpos = positions.get(current, False)
positions[current] = index
steps_back = 0 if lastpos is False else index - lastpos
numbers.append(steps_back)
if len(numbers) == ordinal:
break
return steps_back
def process(numbers: List[int], ordinal: int) -> int:
return find_nth_number(numbers, ordinal)
def cli_main() -> None:
input_funcs = [
partial(str.strip),
partial(splitstrip, sep=","),
partial(lmap, int),
]
numbers = load_and_process_input(INPUT, input_funcs)
for nums, ordinal, tanswer in testdata:
testanswer = find_nth_number(nums, ordinal)
assert testanswer == tanswer
answer_one = process(numbers[:], 2020)
assert answer_one == ANSWER1
print("Answer one:", answer_one)
answer_two = process(numbers[:], 30000000)
assert answer_two == ANSWER2
print("Answer two:", answer_two)
if __name__ == "__main__":
cli_main()
| true | true |
f7261da738c0c2709539722e8b91f6ba23d28fd1 | 8,211 | py | Python | bridgedb/test/test_smtp.py | jugheadjones10/bridgedb | 94d6bca4b22458c156898785d8f6ccedf562d884 | [
"BSD-3-Clause-Clear"
] | null | null | null | bridgedb/test/test_smtp.py | jugheadjones10/bridgedb | 94d6bca4b22458c156898785d8f6ccedf562d884 | [
"BSD-3-Clause-Clear"
] | null | null | null | bridgedb/test/test_smtp.py | jugheadjones10/bridgedb | 94d6bca4b22458c156898785d8f6ccedf562d884 | [
"BSD-3-Clause-Clear"
] | null | null | null | """integration tests for BridgeDB ."""
from __future__ import print_function
import smtplib
import asyncore
import threading
import queue
import random
import os
from smtpd import SMTPServer
from twisted.trial import unittest
from twisted.trial.unittest import FailTest
from twisted.trial.unittest import SkipTest
from bridgedb.test.util import processExists
from bridgedb.test.util import getBridgeDBPID
# ------------- SMTP Client Config
SMTP_DEBUG_LEVEL = 0 # set to 1 to see SMTP message exchange
BRIDGEDB_SMTP_SERVER_ADDRESS = "localhost"
BRIDGEDB_SMTP_SERVER_PORT = 6725
# %d is parameterised with a random integer to make the sender unique
FROM_ADDRESS_TEMPLATE = "test%d@127.0.0.1"
# Minimum value used to parameterise FROM_ADDRESS_TEMPLATE
MIN_FROM_ADDRESS = 1
# Max value used to parameterise FROM_ADDRESS_TEMPLATE. Needs to be pretty big
# to reduce the chance of collisions
MAX_FROM_ADDRESS = 10**8
TO_ADDRESS = "bridges@torproject.org"
MESSAGE_TEMPLATE = """From: %s
To: %s
Subject: testing
get bridges"""
# ------------- SMTP Server Setup
# Setup an SMTP server which we use to check for responses
# from bridgedb. This needs to be done before sending the actual mail
LOCAL_SMTP_SERVER_ADDRESS = 'localhost'
LOCAL_SMTP_SERVER_PORT = 2525 # Must be the same as bridgedb's EMAIL_SMTP_PORT
class EmailServer(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
''' Overridden from SMTP server, called whenever a message is received'''
self.message_queue.put(data)
def thread_proc(self):
''' This function runs in thread, and will continue looping
until the _stop Event object is set by the stop() function'''
while self._stop.is_set() == False:
asyncore.loop(timeout=0.0, count=1)
# Must close, or asyncore will hold on to the socket and subsequent
# tests will fail with 'Address not in use'.
self.close()
def start(self):
self.message_queue = queue.Queue()
self._stop = threading.Event()
self._thread = threading.Thread(target=self.thread_proc)
# Ensures that if any tests do fail, then threads will exit when the
# parent exits.
self._thread.setDaemon(True)
self._thread.start()
@classmethod
def startServer(cls):
#print("Starting SMTP server on %s:%s"
# % (LOCAL_SMTP_SERVER_ADDRESS, LOCAL_SMTP_SERVER_PORT))
server = EmailServer((LOCAL_SMTP_SERVER_ADDRESS,
LOCAL_SMTP_SERVER_PORT),
None)
server.start()
return server
def stop(self):
# Signal thread_proc to stop:
self._stop.set()
# Wait for thread_proc to return (shouldn't take long)
self._thread.join()
assert self._thread.is_alive() == False, "Thread is alive and kicking"
def getAndCheckMessageContains(self, text, timeoutInSecs=2.0):
try:
message = self.message_queue.get(block=True, timeout=timeoutInSecs)
# Queue.Empty, according to its documentation, is only supposed to be
# raised when Queue.get(block=False) or Queue.get_nowait() are called.
# I've no idea why it's getting raised here, when we're blocking for
# it, but nonetheless it causes occasional, non-deterministic CI
# failures:
#
# https://travis-ci.org/isislovecruft/bridgedb/jobs/58996136#L3281
except queue.Empty:
pass
else:
assert message.find(text) != -1, ("Message did not contain text '%s'."
"Full message is:\n %s"
% (text, message))
def checkNoMessageReceived(self, timeoutInSecs=2.0):
try:
self.message_queue.get(block=True, timeout=timeoutInSecs)
except queue.Empty:
return True
assert False, "Found a message in the queue, but expected none"
def sendMail(fromAddress):
#print("Connecting to %s:%d"
# % (BRIDGEDB_SMTP_SERVER_ADDRESS, BRIDGEDB_SMTP_SERVER_PORT))
client = smtplib.SMTP(BRIDGEDB_SMTP_SERVER_ADDRESS,
BRIDGEDB_SMTP_SERVER_PORT)
client.set_debuglevel(SMTP_DEBUG_LEVEL)
#print("Sending mail TO:%s, FROM:%s"
# % (TO_ADDRESS, fromAddress))
result = client.sendmail(fromAddress, TO_ADDRESS,
MESSAGE_TEMPLATE % (fromAddress, TO_ADDRESS))
assert result == {}, "Failed to send mail"
client.quit()
class SMTPTests(unittest.TestCase):
def setUp(self):
'''Called at the start of each test, ensures that the SMTP server is
running.
'''
here = os.getcwd()
topdir = here.rstrip('_trial_temp')
self.rundir = os.path.join(topdir, 'run')
self.pidfile = os.path.join(self.rundir, 'bridgedb.pid')
self.pid = getBridgeDBPID(self.pidfile)
self.server = EmailServer.startServer()
def tearDown(self):
'''Called after each test, ensures that the SMTP server is cleaned up.
'''
self.server.stop()
def test_getBridges(self):
if os.environ.get("CI"):
if not self.pid or not processExists(self.pid):
raise FailTest("Could not start BridgeDB process on CI server!")
if not self.pid or not processExists(self.pid):
raise SkipTest("Can't run test: no BridgeDB process running.")
# send the mail to bridgedb, choosing a random email address
sendMail(fromAddress=FROM_ADDRESS_TEMPLATE
% random.randint(MIN_FROM_ADDRESS, MAX_FROM_ADDRESS))
# then check that our local SMTP server received a response
# and that response contained some bridges
self.server.getAndCheckMessageContains(b"Here are your bridges")
def test_getBridges_rateLimitExceeded(self):
if os.environ.get("CI"):
if not self.pid or not processExists(self.pid):
raise FailTest("Could not start BridgeDB process on CI server!")
if not self.pid or not processExists(self.pid):
raise SkipTest("Can't run test: no BridgeDB process running.")
# send the mail to bridgedb, choosing a random email address
FROM_ADDRESS = FROM_ADDRESS_TEMPLATE % random.randint(
MIN_FROM_ADDRESS, MAX_FROM_ADDRESS)
sendMail(FROM_ADDRESS)
# then check that our local SMTP server received a response
# and that response contained some bridges
self.server.getAndCheckMessageContains(b"Here are your bridges")
# send another request from the same email address
sendMail(FROM_ADDRESS)
# this time, the email response should not contain any bridges
self.server.getAndCheckMessageContains(
b"You have exceeded the rate limit. Please slow down!")
# then we send another request from the same email address
sendMail(FROM_ADDRESS)
# now there should be no response at all (wait 1 second to make sure)
self.server.checkNoMessageReceived(timeoutInSecs=1.0)
def test_getBridges_stressTest(self):
'''Sends a large number of emails in a short period of time, and checks
that a response is received for each message.
'''
if os.environ.get("CI"):
if not self.pid or not processExists(self.pid):
raise FailTest("Could not start BridgeDB process on CI server!")
if not self.pid or not processExists(self.pid):
raise SkipTest("Can't run test: no BridgeDB process running.")
NUM_MAILS = 100
for i in range(NUM_MAILS):
# Note: if by chance two emails with the same FROM_ADDRESS are
# generated, this test will fail Setting 'MAX_FROM_ADDRESS' to be
# a high value reduces the probability of this occuring, but does
# not rule it out
sendMail(fromAddress=FROM_ADDRESS_TEMPLATE
% random.randint(MIN_FROM_ADDRESS, MAX_FROM_ADDRESS))
for i in range(NUM_MAILS):
self.server.getAndCheckMessageContains(b"Here are your bridges")
| 39.666667 | 82 | 0.657776 |
from __future__ import print_function
import smtplib
import asyncore
import threading
import queue
import random
import os
from smtpd import SMTPServer
from twisted.trial import unittest
from twisted.trial.unittest import FailTest
from twisted.trial.unittest import SkipTest
from bridgedb.test.util import processExists
from bridgedb.test.util import getBridgeDBPID
SMTP_DEBUG_LEVEL = 0
BRIDGEDB_SMTP_SERVER_ADDRESS = "localhost"
BRIDGEDB_SMTP_SERVER_PORT = 6725
FROM_ADDRESS_TEMPLATE = "test%d@127.0.0.1"
MIN_FROM_ADDRESS = 1
MAX_FROM_ADDRESS = 10**8
TO_ADDRESS = "bridges@torproject.org"
MESSAGE_TEMPLATE = """From: %s
To: %s
Subject: testing
get bridges"""
LOCAL_SMTP_SERVER_ADDRESS = 'localhost'
LOCAL_SMTP_SERVER_PORT = 2525
class EmailServer(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
self.message_queue.put(data)
def thread_proc(self):
while self._stop.is_set() == False:
asyncore.loop(timeout=0.0, count=1)
# Must close, or asyncore will hold on to the socket and subsequent
# tests will fail with 'Address not in use'.
self.close()
def start(self):
self.message_queue = queue.Queue()
self._stop = threading.Event()
self._thread = threading.Thread(target=self.thread_proc)
# Ensures that if any tests do fail, then threads will exit when the
# parent exits.
self._thread.setDaemon(True)
self._thread.start()
@classmethod
def startServer(cls):
#print("Starting SMTP server on %s:%s"
# % (LOCAL_SMTP_SERVER_ADDRESS, LOCAL_SMTP_SERVER_PORT))
server = EmailServer((LOCAL_SMTP_SERVER_ADDRESS,
LOCAL_SMTP_SERVER_PORT),
None)
server.start()
return server
def stop(self):
# Signal thread_proc to stop:
self._stop.set()
# Wait for thread_proc to return (shouldn't take long)
self._thread.join()
assert self._thread.is_alive() == False, "Thread is alive and kicking"
def getAndCheckMessageContains(self, text, timeoutInSecs=2.0):
try:
message = self.message_queue.get(block=True, timeout=timeoutInSecs)
# it, but nonetheless it causes occasional, non-deterministic CI
# failures:
#
# https://travis-ci.org/isislovecruft/bridgedb/jobs/58996136#L3281
except queue.Empty:
pass
else:
assert message.find(text) != -1, ("Message did not contain text '%s'."
"Full message is:\n %s"
% (text, message))
def checkNoMessageReceived(self, timeoutInSecs=2.0):
try:
self.message_queue.get(block=True, timeout=timeoutInSecs)
except queue.Empty:
return True
assert False, "Found a message in the queue, but expected none"
def sendMail(fromAddress):
#print("Connecting to %s:%d"
# % (BRIDGEDB_SMTP_SERVER_ADDRESS, BRIDGEDB_SMTP_SERVER_PORT))
client = smtplib.SMTP(BRIDGEDB_SMTP_SERVER_ADDRESS,
BRIDGEDB_SMTP_SERVER_PORT)
client.set_debuglevel(SMTP_DEBUG_LEVEL)
#print("Sending mail TO:%s, FROM:%s"
# % (TO_ADDRESS, fromAddress))
result = client.sendmail(fromAddress, TO_ADDRESS,
MESSAGE_TEMPLATE % (fromAddress, TO_ADDRESS))
assert result == {}, "Failed to send mail"
client.quit()
class SMTPTests(unittest.TestCase):
def setUp(self):
here = os.getcwd()
topdir = here.rstrip('_trial_temp')
self.rundir = os.path.join(topdir, 'run')
self.pidfile = os.path.join(self.rundir, 'bridgedb.pid')
self.pid = getBridgeDBPID(self.pidfile)
self.server = EmailServer.startServer()
def tearDown(self):
self.server.stop()
def test_getBridges(self):
if os.environ.get("CI"):
if not self.pid or not processExists(self.pid):
raise FailTest("Could not start BridgeDB process on CI server!")
if not self.pid or not processExists(self.pid):
raise SkipTest("Can't run test: no BridgeDB process running.")
sendMail(fromAddress=FROM_ADDRESS_TEMPLATE
% random.randint(MIN_FROM_ADDRESS, MAX_FROM_ADDRESS))
self.server.getAndCheckMessageContains(b"Here are your bridges")
def test_getBridges_rateLimitExceeded(self):
if os.environ.get("CI"):
if not self.pid or not processExists(self.pid):
raise FailTest("Could not start BridgeDB process on CI server!")
if not self.pid or not processExists(self.pid):
raise SkipTest("Can't run test: no BridgeDB process running.")
# send the mail to bridgedb, choosing a random email address
FROM_ADDRESS = FROM_ADDRESS_TEMPLATE % random.randint(
MIN_FROM_ADDRESS, MAX_FROM_ADDRESS)
sendMail(FROM_ADDRESS)
# then check that our local SMTP server received a response
# and that response contained some bridges
self.server.getAndCheckMessageContains(b"Here are your bridges")
# send another request from the same email address
sendMail(FROM_ADDRESS)
# this time, the email response should not contain any bridges
self.server.getAndCheckMessageContains(
b"You have exceeded the rate limit. Please slow down!")
# then we send another request from the same email address
sendMail(FROM_ADDRESS)
# now there should be no response at all (wait 1 second to make sure)
self.server.checkNoMessageReceived(timeoutInSecs=1.0)
def test_getBridges_stressTest(self):
if os.environ.get("CI"):
if not self.pid or not processExists(self.pid):
raise FailTest("Could not start BridgeDB process on CI server!")
if not self.pid or not processExists(self.pid):
raise SkipTest("Can't run test: no BridgeDB process running.")
NUM_MAILS = 100
for i in range(NUM_MAILS):
sendMail(fromAddress=FROM_ADDRESS_TEMPLATE
% random.randint(MIN_FROM_ADDRESS, MAX_FROM_ADDRESS))
for i in range(NUM_MAILS):
self.server.getAndCheckMessageContains(b"Here are your bridges")
| true | true |
f7261db3cd3b74253335a7d73475dfcccd0a55ec | 1,641 | py | Python | unpackMFC.py | dare0021/KerasBasedSpeechClassifier | aa507b2a2eb39963bd9ed3279288dbe6c877166c | [
"MIT"
] | 1 | 2016-12-28T07:18:52.000Z | 2016-12-28T07:18:52.000Z | unpackMFC.py | dare0021/KerasBasedSpeechClassifier | aa507b2a2eb39963bd9ed3279288dbe6c877166c | [
"MIT"
] | 2 | 2016-09-30T05:41:06.000Z | 2017-03-28T11:58:58.000Z | unpackMFC.py | dare0021/KerasBasedSpeechClassifier | aa507b2a2eb39963bd9ed3279288dbe6c877166c | [
"MIT"
] | null | null | null | import numpy as np
import struct
# CMU Sphinx 4 mfc file opener
# takes file path as input
# Sphinx uses feature vectors of length 13 by default
def run(input, featureVectorSize):
file = open(input, 'r')
size = struct.unpack('>i', ''.join(file.read(4)))[0]
if ((float)(size)) / featureVectorSize - (float)(size // featureVectorSize) != 0:
print "ERR: unpackMFC.run().featureVectorSize is inconsistent with the feature count read from the file given."
print "File given: ", input
print "Feature count read: ", size
print "featureVectorSize: ", featureVectorSize
assert False
out = np.zeros(shape=(size/featureVectorSize, featureVectorSize))
for i in range(size):
out[i//featureVectorSize][i%featureVectorSize] = struct.unpack('>f', ''.join(file.read(4)))[0]
return out
# Returns windowed result with 0 padding
# e.g. for frames 1,2,3,4,5: [[1,2,3], [4,5,0]]
# windowSize is in frames
# a frame is 10ms
# recommended value: 1~3 sec
def returnWindowed(input, featureVectorSize, windowSize):
raw = run(input, featureVectorSize)
numcells = len(raw) // windowSize
if len(raw) % windowSize > 0:
numcells += 1
raw = raw.flatten()
raw = np.append(raw, np.zeros(shape=(numcells*windowSize*featureVectorSize - len(raw))))
return raw.reshape(numcells, windowSize, featureVectorSize)
def runForAll(input, featureVectorSize, windowSize):
out = []
if windowSize > 1:
for i in input:
out.append(returnWindowed(i, featureVectorSize, windowSize))
else:
for i in input:
out.append(run(i, featureVectorSize))
return out
# print returnWindowed("../SPK_DB/mfc13OnlySilences2e5/C002_M4_INDE_025.wav.mfc", 13, 100).shape
| 35.673913 | 113 | 0.724558 | import numpy as np
import struct
def run(input, featureVectorSize):
file = open(input, 'r')
size = struct.unpack('>i', ''.join(file.read(4)))[0]
if ((float)(size)) / featureVectorSize - (float)(size // featureVectorSize) != 0:
print "ERR: unpackMFC.run().featureVectorSize is inconsistent with the feature count read from the file given."
print "File given: ", input
print "Feature count read: ", size
print "featureVectorSize: ", featureVectorSize
assert False
out = np.zeros(shape=(size/featureVectorSize, featureVectorSize))
for i in range(size):
out[i//featureVectorSize][i%featureVectorSize] = struct.unpack('>f', ''.join(file.read(4)))[0]
return out
def returnWindowed(input, featureVectorSize, windowSize):
raw = run(input, featureVectorSize)
numcells = len(raw) // windowSize
if len(raw) % windowSize > 0:
numcells += 1
raw = raw.flatten()
raw = np.append(raw, np.zeros(shape=(numcells*windowSize*featureVectorSize - len(raw))))
return raw.reshape(numcells, windowSize, featureVectorSize)
def runForAll(input, featureVectorSize, windowSize):
out = []
if windowSize > 1:
for i in input:
out.append(returnWindowed(i, featureVectorSize, windowSize))
else:
for i in input:
out.append(run(i, featureVectorSize))
return out
| false | true |
f7261de6c7c541d6de5d6ee3125ac35b2d456e80 | 7,792 | py | Python | test/functional/combine_logs.py | Techcoingithub/techcoin | 6914faea0496d16d85f4f11fc1ae2ba05e9143b8 | [
"MIT"
] | null | null | null | test/functional/combine_logs.py | Techcoingithub/techcoin | 6914faea0496d16d85f4f11fc1ae2ba05e9143b8 | [
"MIT"
] | 1 | 2021-11-30T18:41:44.000Z | 2022-01-17T17:55:26.000Z | test/functional/combine_logs.py | Techcoingithub/techcoin | 6914faea0496d16d85f4f11fc1ae2ba05e9143b8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Combine logs from multiple techcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import pathlib
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "techcoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
colors = defaultdict(lambda: '')
if args.color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
log_events = read_logs(testdir)
if args.html:
print_logs_html(log_events)
else:
print_logs_plain(log_events, colors)
print_node_warnings(testdir, colors)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
# Find out what the folder is called that holds the debug.log file
glob = pathlib.Path(tmp_dir).glob('node0/**/debug.log')
path = next(glob, None)
if path:
assert next(glob, None) is None # more than one debug.log, should never happen
chain = re.search(r'node0/(.+?)/debug\.log$', path.as_posix()).group(1) # extract the chain name
else:
chain = 'regtest' # fallback to regtest (should only happen when none exists)
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/{}/debug.log".format(tmp_dir, i, chain)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def print_node_warnings(tmp_dir, colors):
"""Print nodes' errors and warnings"""
warnings = []
for stream in ['stdout', 'stderr']:
for i in itertools.count():
folder = "{}/node{}/{}".format(tmp_dir, i, stream)
if not os.path.isdir(folder):
break
for (_, _, fns) in os.walk(folder):
for fn in fns:
warning = pathlib.Path('{}/{}'.format(folder, fn)).read_text().strip()
if warning:
warnings.append(("node{} {}".format(i, stream), warning))
print()
for w in warnings:
print("{} {} {} {}".format(colors[w[0].split()[0]], w[0], w[1], colors["reset"]))
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
# timestamp does not have microseconds. Add zeroes.
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
# Add the line. Prefix with space equivalent to the source + timestamp so log lines are aligned
event += " " + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs_plain(log_events, colors):
"""Renders the iterator of log events into text."""
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
def print_logs_html(log_events):
"""Renders the iterator of log events into html."""
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| 38.574257 | 196 | 0.617941 |
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import pathlib
import re
import sys
import tempfile
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "techcoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
colors = defaultdict(lambda: '')
if args.color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
log_events = read_logs(testdir)
if args.html:
print_logs_html(log_events)
else:
print_logs_plain(log_events, colors)
print_node_warnings(testdir, colors)
def read_logs(tmp_dir):
# Find out what the folder is called that holds the debug.log file
glob = pathlib.Path(tmp_dir).glob('node0/**/debug.log')
path = next(glob, None)
if path:
assert next(glob, None) is None # more than one debug.log, should never happen
chain = re.search(r'node0/(.+?)/debug\.log$', path.as_posix()).group(1) # extract the chain name
else:
chain = 'regtest' # fallback to regtest (should only happen when none exists)
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/{}/debug.log".format(tmp_dir, i, chain)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def print_node_warnings(tmp_dir, colors):
warnings = []
for stream in ['stdout', 'stderr']:
for i in itertools.count():
folder = "{}/node{}/{}".format(tmp_dir, i, stream)
if not os.path.isdir(folder):
break
for (_, _, fns) in os.walk(folder):
for fn in fns:
warning = pathlib.Path('{}/{}'.format(folder, fn)).read_text().strip()
if warning:
warnings.append(("node{} {}".format(i, stream), warning))
print()
for w in warnings:
print("{} {} {} {}".format(colors[w[0].split()[0]], w[0], w[1], colors["reset"]))
def find_latest_test_dir():
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
else:
event += " " + line
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs_plain(log_events, colors):
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
def print_logs_html(log_events):
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| true | true |
f7261e37ae3b0a363433aacc10d91805d93f9dcf | 391 | py | Python | src/create_github_project/commands/versions/__init__.py | nkomiya/create-github-project | e60028a3edf0fdee2f76ffd26296e01588235324 | [
"MIT"
] | 1 | 2021-08-20T08:28:45.000Z | 2021-08-20T08:28:45.000Z | src/create_github_project/commands/versions/__init__.py | nkomiya/create-github-project | e60028a3edf0fdee2f76ffd26296e01588235324 | [
"MIT"
] | 37 | 2021-07-24T11:47:28.000Z | 2021-08-08T19:23:24.000Z | src/create_github_project/commands/versions/__init__.py | nkomiya/create-github-project | e60028a3edf0fdee2f76ffd26296e01588235324 | [
"MIT"
] | null | null | null | import click
from .current import current
from .update import update
__all__ = [
'build'
]
@click.group(help='Manage tool versions.')
def versions() -> None:
pass
def build(cmd: click.Group) -> None:
"""親コマンドにサブコマンドを追加する。
Args:
cmd (click.Group): 親コマンド
"""
versions.add_command(current)
versions.add_command(update)
cmd.add_command(versions)
| 14.481481 | 42 | 0.657289 | import click
from .current import current
from .update import update
__all__ = [
'build'
]
@click.group(help='Manage tool versions.')
def versions() -> None:
pass
def build(cmd: click.Group) -> None:
versions.add_command(current)
versions.add_command(update)
cmd.add_command(versions)
| true | true |
f7261ecb34d10b321947c71f2d29ac44db652fb3 | 20,071 | py | Python | vis/visualize_court.py | szhaofelicia/sgan | ead42d4bb3b1278c4c9ffcae8fa9c2dc036a52ff | [
"MIT"
] | 3 | 2022-01-02T16:58:39.000Z | 2022-02-07T08:29:48.000Z | vis/visualize_court.py | szhaofelicia/sgan | ead42d4bb3b1278c4c9ffcae8fa9c2dc036a52ff | [
"MIT"
] | null | null | null | vis/visualize_court.py | szhaofelicia/sgan | ead42d4bb3b1278c4c9ffcae8fa9c2dc036a52ff | [
"MIT"
] | null | null | null | import numpy as np
# import plotly
import plotly.graph_objects as go
def draw_plotly_half_court(fig, fig_width=600, margins=10):
# From: https://community.plot.ly/t/arc-shape-with-path/7205/5
def ellipse_arc(x_center=0.0, y_center=0.0, a=10.5, b=10.5, start_angle=0.0, end_angle=2 * np.pi, N=200, closed=False):
t = np.linspace(start_angle, end_angle, N)
x = x_center + a * np.cos(t)
y = y_center + b * np.sin(t)
path = f'M {x[0]}, {y[0]}'
for k in range(1, len(t)):
path += f'L{x[k]}, {y[k]}'
if closed:
path += ' Z'
return path
fig_height = fig_width * (470 + 2 * margins) / (500 + 2 * margins)
fig.update_layout(width=fig_width, height=fig_height)
# Set axes ranges
fig.update_xaxes(range=[-250 - margins, 250 + margins])
fig.update_yaxes(range=[-52.5 - margins, 417.5 + margins])
threept_break_y = 89.47765084
three_line_col = "#777777"
main_line_col = "#777777"
fig.update_layout(
# Line Horizontal
margin=dict(l=20, r=20, t=20, b=20),
paper_bgcolor="white",
plot_bgcolor="white",
yaxis=dict(
scaleanchor="x",
scaleratio=1,
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
fixedrange=True,
),
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
fixedrange=True,
),
shapes=[
# half_layout=[
dict(
type="rect", x0=-250, y0=-52.5, x1=250, y1=417.5,
line=dict(color=main_line_col, width=1),
# fillcolor='#333333',
layer='below'
), ## sideline rect
dict(
type="rect", x0=-80, y0=-52.5, x1=80, y1=137.5,
line=dict(color=main_line_col, width=1),
# fillcolor='#333333',
layer='below'
),# lane line rect
dict(
type="rect", x0=-60, y0=-52.5, x1=60, y1=137.5,
line=dict(color=main_line_col, width=1),
# fillcolor='#333333',
layer='below'
), # foul line rect
dict(
type="circle", x0=-60, y0=77.5, x1=60, y1=197.5, xref="x", yref="y",
line=dict(color=main_line_col, width=1),
# fillcolor='#dddddd',
layer='below'
), # free-throw circle
dict(
type="line", x0=-60, y0=137.5, x1=60, y1=137.5,
line=dict(color=main_line_col, width=1),
layer='below'
), # foul line
dict(
type="rect", x0=-2, y0=-7.25, x1=2, y1=-12.5,
line=dict(color="#ec7607", width=1),
fillcolor='#ec7607',
), # hoop rect
dict(
type="circle", x0=-7.5, y0=-7.5, x1=7.5, y1=7.5, xref="x", yref="y",
line=dict(color="#ec7607", width=1),
), # hoop circle
dict(
type="line", x0=-30, y0=-12.5, x1=30, y1=-12.5,
line=dict(color="#ec7607", width=1),
), # backboard
dict(type="path",
path=ellipse_arc(a=40, b=40, start_angle=0, end_angle=np.pi),
line=dict(color=main_line_col, width=1), layer='below'), # no-change semi-circle
dict(type="path",
path=ellipse_arc(a=237.5, b=237.5, start_angle=0.386283101, end_angle=np.pi - 0.386283101),
line=dict(color=main_line_col, width=1), layer='below'), # three-point line:arc
dict(
type="line", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,
line=dict(color=three_line_col, width=1), layer='below'
), # three-point line:left edge
# dict(
# type="line", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,
# line=dict(color=three_line_col, width=1), layer='below'
# ),
dict(
type="line", x0=220, y0=-52.5, x1=220, y1=threept_break_y,
line=dict(color=three_line_col, width=1), layer='below'
), # three-point line:right edge
dict(
type="line", x0=-250, y0=227.5, x1=-220, y1=227.5,
line=dict(color=main_line_col, width=1), layer='below'
), # midcourt area marker:left
dict(
type="line", x0=250, y0=227.5, x1=220, y1=227.5,
line=dict(color=main_line_col, width=1), layer='below'
), # midcourt area marker:right
dict(
type="line", x0=-90, y0=17.5, x1=-80, y1=17.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=-90, y0=27.5, x1=-80, y1=27.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=-90, y0=57.5, x1=-80, y1=57.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=-90, y0=87.5, x1=-80, y1=87.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=90, y0=17.5, x1=80, y1=17.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=90, y0=27.5, x1=80, y1=27.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=90, y0=57.5, x1=80, y1=57.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=90, y0=87.5, x1=80, y1=87.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(type="path",
path=ellipse_arc(y_center=417.5, a=60, b=60, start_angle=-0, end_angle=-np.pi),
line=dict(color=main_line_col, width=1), layer='below'), # center circle: half
]
)
return True
def draw_plotly_whole_court(fig, fig_width=600, margins=10):
# From: https://community.plot.ly/t/arc-shape-with-path/7205/5
def ellipse_arc(x_center=0.0, y_center=0.0, a=10.5, b=10.5, start_angle=0.0, end_angle=2 * np.pi, N=200, closed=False):
t = np.linspace(start_angle, end_angle, N)
x = x_center + a * np.cos(t)
y = y_center + b * np.sin(t)
path = f'M {x[0]}, {y[0]}'
for k in range(1, len(t)):
path += f'L{x[k]}, {y[k]}'
if closed:
path += ' Z'
return path
fig_height = fig_width * (470*2 + 2 * margins) / (500 + 2 * margins)
fig.update_layout(width=fig_width, height=fig_height)
# Set axes ranges
fig.update_xaxes(range=[-250 - margins, 250 + margins])
fig.update_yaxes(range=[-52.5 - margins, 417.5+470 + margins])
# fig.update_xaxes(range=[ margins, 500 + margins])
# fig.update_yaxes(range=[margins, 470*2 + margins])
threept_break_y = 89.47765084
three_line_col = "#777777"
main_line_col = "#777777"
fig.update_layout(
# Line Horizontal
margin=dict(l=20, r=20, t=20, b=20),
paper_bgcolor="white",
plot_bgcolor="white",
yaxis=dict(
scaleanchor="x",
scaleratio=1,
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
fixedrange=True,
),
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
fixedrange=True,
),
# width:500, height: 470
shapes=[
dict(
type="rect", x0=-250, y0=-52.5, x1=250, y1=417.5+470,
line=dict(color=main_line_col, width=1),
# fillcolor='#333333',
layer='below'
), ## sideline rect
# dict(
# type="rect", x0=-250, y0=-52.5, x1=250, y1=417.5,
# line=dict(color=main_line_col, width=1),
# # fillcolor='#333333',
# layer='below'
# ), ## sideline rect
dict(
type="rect", x0=-80, y0=-52.5, x1=80, y1=137.5,
line=dict(color=main_line_col, width=1),
# fillcolor='#333333',
layer='below'
),# lane line rect
dict(
type="rect", x0=-60, y0=-52.5, x1=60, y1=137.5,
line=dict(color=main_line_col, width=1),
# fillcolor='#333333',
layer='below'
), # foul line rect
dict(
type="circle", x0=-60, y0=77.5, x1=60, y1=197.5, xref="x", yref="y",
line=dict(color=main_line_col, width=1),
# fillcolor='#dddddd',
layer='below'
), # free-throw circle
dict(
type="line", x0=-60, y0=137.5, x1=60, y1=137.5,
line=dict(color=main_line_col, width=1),
layer='below'
), # foul line
dict(
type="rect", x0=-2, y0=-7.25, x1=2, y1=-12.5,
line=dict(color="#ec7607", width=1),
fillcolor='#ec7607',
), # hoop rect
dict(
type="circle", x0=-7.5, y0=-7.5, x1=7.5, y1=7.5, xref="x", yref="y",
line=dict(color="#ec7607", width=1),
), # hoop circle
dict(
type="line", x0=-30, y0=-12.5, x1=30, y1=-12.5,
line=dict(color="#ec7607", width=1),
), # backboard
dict(type="path",
path=ellipse_arc(a=40, b=40, start_angle=0, end_angle=np.pi),
line=dict(color=main_line_col, width=1), layer='below'), # no-change semi-circle
dict(type="path",
path=ellipse_arc(a=237.5, b=237.5, start_angle=0.386283101, end_angle=np.pi - 0.386283101),
line=dict(color=main_line_col, width=1), layer='below'), # three-point line:arc
dict(
type="line", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,
line=dict(color=three_line_col, width=1), layer='below'
), # three-point line:left edge
# dict(
# type="line", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,
# line=dict(color=three_line_col, width=1), layer='below'
# ),
dict(
type="line", x0=220, y0=-52.5, x1=220, y1=threept_break_y,
line=dict(color=three_line_col, width=1), layer='below'
), # three-point line:right edge
dict(
type="line", x0=-250, y0=227.5, x1=-220, y1=227.5,
line=dict(color=main_line_col, width=1), layer='below'
), # midcourt area marker:left
dict(
type="line", x0=250, y0=227.5, x1=220, y1=227.5,
line=dict(color=main_line_col, width=1), layer='below'
), # midcourt area marker:right
dict(
type="line", x0=-90, y0=17.5, x1=-80, y1=17.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=-90, y0=27.5, x1=-80, y1=27.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=-90, y0=57.5, x1=-80, y1=57.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=-90, y0=87.5, x1=-80, y1=87.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=90, y0=17.5, x1=80, y1=17.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=90, y0=27.5, x1=80, y1=27.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=90, y0=57.5, x1=80, y1=57.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(
type="line", x0=90, y0=87.5, x1=80, y1=87.5,
line=dict(color=main_line_col, width=1), layer='below'
), # lane line marker
dict(type="path",
path=ellipse_arc(y_center=417.5, a=60, b=60, start_angle=-0, end_angle=-np.pi),
line=dict(color=main_line_col, width=1), layer='below'), # center circle: half
## upper
# dict(
# type="rect", x0=-250, y0=-52.5, x1=250, y1=417.5,
# line=dict(color=main_line_col, width=1),
# # fillcolor='#333333',
# layer='below'
# ), ## sideline rect
# dict(
# type="rect", x0=-80, y0=-52.5, x1=80, y1=137.5,
# line=dict(color=main_line_col, width=1),
# # fillcolor='#333333',
# layer='below'
# ), # lane line rect
# dict(
# type="rect", x0=-60, y0=-52.5, x1=60, y1=137.5,
# line=dict(color=main_line_col, width=1),
# # fillcolor='#333333',
# layer='below'
# ), # foul line rect
# dict(
# type="circle", x0=-60, y0=77.5, x1=60, y1=197.5, xref="x", yref="y",
# line=dict(color=main_line_col, width=1),
# # fillcolor='#dddddd',
# layer='below'
# ), # free-throw circle
# dict(
# type="line", x0=-60, y0=137.5, x1=60, y1=137.5,
# line=dict(color=main_line_col, width=1),
# layer='below'
# ), # foul line
#
# dict(
# type="rect", x0=-2, y0=-7.25, x1=2, y1=-12.5,
# line=dict(color="#ec7607", width=1),
# fillcolor='#ec7607',
# ), # hoop rect
# dict(
# type="circle", x0=-7.5, y0=-7.5, x1=7.5, y1=7.5, xref="x", yref="y",
# line=dict(color="#ec7607", width=1),
# ), # hoop circle
# dict(
# type="line", x0=-30, y0=-12.5, x1=30, y1=-12.5,
# line=dict(color="#ec7607", width=1),
# ), # backboard
#
# dict(type="path",
# path=ellipse_arc(a=40, b=40, start_angle=0, end_angle=np.pi),
# line=dict(color=main_line_col, width=1), layer='below'), # no-change semi-circle
# dict(type="path",
# path=ellipse_arc(a=237.5, b=237.5, start_angle=0.386283101, end_angle=np.pi - 0.386283101),
# line=dict(color=main_line_col, width=1), layer='below'), # three-point line:arc
# dict(
# type="line", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,
# line=dict(color=three_line_col, width=1), layer='below'
# ), # three-point line:left edge
# # dict(
# # type="line", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,
# # line=dict(color=three_line_col, width=1), layer='below'
# # ),
# dict(
# type="line", x0=220, y0=-52.5, x1=220, y1=threept_break_y,
# line=dict(color=three_line_col, width=1), layer='below'
# ), # three-point line:right edge
#
# dict(
# type="line", x0=-250, y0=227.5, x1=-220, y1=227.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # midcourt area marker:left
# dict(
# type="line", x0=250, y0=227.5, x1=220, y1=227.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # midcourt area marker:right
# dict(
# type="line", x0=-90, y0=17.5, x1=-80, y1=17.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # lane line marker
# dict(
# type="line", x0=-90, y0=27.5, x1=-80, y1=27.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # lane line marker
# dict(
# type="line", x0=-90, y0=57.5, x1=-80, y1=57.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # lane line marker
# dict(
# type="line", x0=-90, y0=87.5, x1=-80, y1=87.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # lane line marker
# dict(
# type="line", x0=90, y0=17.5, x1=80, y1=17.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # lane line marker
# dict(
# type="line", x0=90, y0=27.5, x1=80, y1=27.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # lane line marker
# dict(
# type="line", x0=90, y0=57.5, x1=80, y1=57.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # lane line marker
# dict(
# type="line", x0=90, y0=87.5, x1=80, y1=87.5,
# line=dict(color=main_line_col, width=1), layer='below'
# ), # lane line marker
#
# dict(type="path",
# path=ellipse_arc(y_center=417.5, a=60, b=60, start_angle=-0, end_angle=-np.pi),
# line=dict(color=main_line_col, width=1), layer='below'), # center circle: half
]
)
return True
max_freq = 0.002
# freq_by_hex = np.array([min(max_freq, i) for i in league_hexbin_stats['freq_by_hex']])
colorscale = 'YlOrRd'
marker_cmin = 0.1
marker_cmax = 0.6
ticktexts = [str(marker_cmin*100)+'%-', "", str(marker_cmax*100)+'%+']
fig = go.Figure()
# draw_plotly_half_court(fig)
draw_plotly_whole_court(fig)
# fig.add_trace(go.Scatter(
# x=xlocs, y=ylocs, mode='markers', name='markers',
# marker=dict(
# size=freq_by_hex, sizemode='area', sizeref=2. * max(freq_by_hex) / (11. ** 2), sizemin=2.5,
# color=accs_by_hex, colorscale=colorscale,
# colorbar=dict(
# thickness=15,
# x=0.84,
# y=0.87,
# yanchor='middle',
# len=0.2,
# title=dict(
# text="<B>Accuracy</B>",
# font=dict(
# size=11,
# color='#4d4d4d'
# ),
# ),
# tickvals=[marker_cmin, (marker_cmin + marker_cmax) / 2, marker_cmax],
# ticktext=ticktexts,
# tickfont=dict(
# size=11,
# color='#4d4d4d'
# )
# ),
# cmin=marker_cmin, cmax=marker_cmax,
# line=dict(width=1, color='#333333'), symbol='hexagon',
# ),
# ))
# fig.show(config=dict(displayModeBar=False))
# fig.show()
vis_dir='/media/felicia/Data/sgan_results/vis/'
fig.write_image(vis_dir+"court.svg")
| 40.061876 | 123 | 0.478202 | import numpy as np
import plotly.graph_objects as go
def draw_plotly_half_court(fig, fig_width=600, margins=10):
def ellipse_arc(x_center=0.0, y_center=0.0, a=10.5, b=10.5, start_angle=0.0, end_angle=2 * np.pi, N=200, closed=False):
t = np.linspace(start_angle, end_angle, N)
x = x_center + a * np.cos(t)
y = y_center + b * np.sin(t)
path = f'M {x[0]}, {y[0]}'
for k in range(1, len(t)):
path += f'L{x[k]}, {y[k]}'
if closed:
path += ' Z'
return path
fig_height = fig_width * (470 + 2 * margins) / (500 + 2 * margins)
fig.update_layout(width=fig_width, height=fig_height)
fig.update_xaxes(range=[-250 - margins, 250 + margins])
fig.update_yaxes(range=[-52.5 - margins, 417.5 + margins])
threept_break_y = 89.47765084
three_line_col = "#777777"
main_line_col = "#777777"
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
paper_bgcolor="white",
plot_bgcolor="white",
yaxis=dict(
scaleanchor="x",
scaleratio=1,
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
fixedrange=True,
),
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
fixedrange=True,
),
shapes=[
dict(
type="rect", x0=-250, y0=-52.5, x1=250, y1=417.5,
line=dict(color=main_line_col, width=1),
layer='below'
), ct(
type="rect", x0=-80, y0=-52.5, x1=80, y1=137.5,
line=dict(color=main_line_col, width=1),
layer='below'
),
dict(
type="rect", x0=-60, y0=-52.5, x1=60, y1=137.5,
line=dict(color=main_line_col, width=1),
layer='below'
),
dict(
type="circle", x0=-60, y0=77.5, x1=60, y1=197.5, xref="x", yref="y",
line=dict(color=main_line_col, width=1),
layer='below'
),
dict(
type="line", x0=-60, y0=137.5, x1=60, y1=137.5,
line=dict(color=main_line_col, width=1),
layer='below'
),
dict(
type="rect", x0=-2, y0=-7.25, x1=2, y1=-12.5,
line=dict(color="#ec7607", width=1),
fillcolor='#ec7607',
),
dict(
type="circle", x0=-7.5, y0=-7.5, x1=7.5, y1=7.5, xref="x", yref="y",
line=dict(color="#ec7607", width=1),
),
dict(
type="line", x0=-30, y0=-12.5, x1=30, y1=-12.5,
line=dict(color="#ec7607", width=1),
),
dict(type="path",
path=ellipse_arc(a=40, b=40, start_angle=0, end_angle=np.pi),
line=dict(color=main_line_col, width=1), layer='below'),
dict(type="path",
path=ellipse_arc(a=237.5, b=237.5, start_angle=0.386283101, end_angle=np.pi - 0.386283101),
line=dict(color=main_line_col, width=1), layer='below'),
dict(
type="line", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,
line=dict(color=three_line_col, width=1), layer='below'
),
dict(
type="line", x0=220, y0=-52.5, x1=220, y1=threept_break_y,
line=dict(color=three_line_col, width=1), layer='below'
),
dict(
type="line", x0=-250, y0=227.5, x1=-220, y1=227.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=250, y0=227.5, x1=220, y1=227.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=-90, y0=17.5, x1=-80, y1=17.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=-90, y0=27.5, x1=-80, y1=27.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=-90, y0=57.5, x1=-80, y1=57.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=-90, y0=87.5, x1=-80, y1=87.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=90, y0=17.5, x1=80, y1=17.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=90, y0=27.5, x1=80, y1=27.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=90, y0=57.5, x1=80, y1=57.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=90, y0=87.5, x1=80, y1=87.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(type="path",
path=ellipse_arc(y_center=417.5, a=60, b=60, start_angle=-0, end_angle=-np.pi),
line=dict(color=main_line_col, width=1), layer='below'),
]
)
return True
def draw_plotly_whole_court(fig, fig_width=600, margins=10):
def ellipse_arc(x_center=0.0, y_center=0.0, a=10.5, b=10.5, start_angle=0.0, end_angle=2 * np.pi, N=200, closed=False):
t = np.linspace(start_angle, end_angle, N)
x = x_center + a * np.cos(t)
y = y_center + b * np.sin(t)
path = f'M {x[0]}, {y[0]}'
for k in range(1, len(t)):
path += f'L{x[k]}, {y[k]}'
if closed:
path += ' Z'
return path
fig_height = fig_width * (470*2 + 2 * margins) / (500 + 2 * margins)
fig.update_layout(width=fig_width, height=fig_height)
fig.update_xaxes(range=[-250 - margins, 250 + margins])
fig.update_yaxes(range=[-52.5 - margins, 417.5+470 + margins])
threept_break_y = 89.47765084
three_line_col = "#777777"
main_line_col = "#777777"
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
paper_bgcolor="white",
plot_bgcolor="white",
yaxis=dict(
scaleanchor="x",
scaleratio=1,
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
fixedrange=True,
),
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
ticks='',
showticklabels=False,
fixedrange=True,
),
shapes=[
dict(
type="rect", x0=-250, y0=-52.5, x1=250, y1=417.5+470,
line=dict(color=main_line_col, width=1),
layer='below'
),
type="rect", x0=-80, y0=-52.5, x1=80, y1=137.5,
line=dict(color=main_line_col, width=1),
layer='below'
),
dict(
type="rect", x0=-60, y0=-52.5, x1=60, y1=137.5,
line=dict(color=main_line_col, width=1),
layer='below'
),
dict(
type="circle", x0=-60, y0=77.5, x1=60, y1=197.5, xref="x", yref="y",
line=dict(color=main_line_col, width=1),
layer='below'
),
dict(
type="line", x0=-60, y0=137.5, x1=60, y1=137.5,
line=dict(color=main_line_col, width=1),
layer='below'
),
dict(
type="rect", x0=-2, y0=-7.25, x1=2, y1=-12.5,
line=dict(color="#ec7607", width=1),
fillcolor='#ec7607',
),
dict(
type="circle", x0=-7.5, y0=-7.5, x1=7.5, y1=7.5, xref="x", yref="y",
line=dict(color="#ec7607", width=1),
),
dict(
type="line", x0=-30, y0=-12.5, x1=30, y1=-12.5,
line=dict(color="#ec7607", width=1),
),
dict(type="path",
path=ellipse_arc(a=40, b=40, start_angle=0, end_angle=np.pi),
line=dict(color=main_line_col, width=1), layer='below'),
dict(type="path",
path=ellipse_arc(a=237.5, b=237.5, start_angle=0.386283101, end_angle=np.pi - 0.386283101),
line=dict(color=main_line_col, width=1), layer='below'),
dict(
type="line", x0=-220, y0=-52.5, x1=-220, y1=threept_break_y,
line=dict(color=three_line_col, width=1), layer='below'
),
dict(
type="line", x0=220, y0=-52.5, x1=220, y1=threept_break_y,
line=dict(color=three_line_col, width=1), layer='below'
),
dict(
type="line", x0=-250, y0=227.5, x1=-220, y1=227.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=250, y0=227.5, x1=220, y1=227.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=-90, y0=17.5, x1=-80, y1=17.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=-90, y0=27.5, x1=-80, y1=27.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=-90, y0=57.5, x1=-80, y1=57.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=-90, y0=87.5, x1=-80, y1=87.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=90, y0=17.5, x1=80, y1=17.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=90, y0=27.5, x1=80, y1=27.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=90, y0=57.5, x1=80, y1=57.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(
type="line", x0=90, y0=87.5, x1=80, y1=87.5,
line=dict(color=main_line_col, width=1), layer='below'
),
dict(type="path",
path=ellipse_arc(y_center=417.5, a=60, b=60, start_angle=-0, end_angle=-np.pi),
line=dict(color=main_line_col, width=1), layer='below'),
return True
max_freq = 0.002
colorscale = 'YlOrRd'
marker_cmin = 0.1
marker_cmax = 0.6
ticktexts = [str(marker_cmin*100)+'%-', "", str(marker_cmax*100)+'%+']
fig = go.Figure()
draw_plotly_whole_court(fig)
vis_dir='/media/felicia/Data/sgan_results/vis/'
fig.write_image(vis_dir+"court.svg")
| true | true |
f7261f4f8b69b121e8b4085d991fa58a731f368d | 1,412 | py | Python | cuestionario/urls.py | LisandroCanteros/Grupo2_COM06_Info2021 | 86ad9e08db4e8935bf397b6e4db0b3d9d72cb320 | [
"MIT"
] | null | null | null | cuestionario/urls.py | LisandroCanteros/Grupo2_COM06_Info2021 | 86ad9e08db4e8935bf397b6e4db0b3d9d72cb320 | [
"MIT"
] | null | null | null | cuestionario/urls.py | LisandroCanteros/Grupo2_COM06_Info2021 | 86ad9e08db4e8935bf397b6e4db0b3d9d72cb320 | [
"MIT"
] | 1 | 2021-09-05T23:29:56.000Z | 2021-09-05T23:29:56.000Z | from django.urls import path
from .views import (
CategoriasListView, pagina_principal,
CuestionarioListView, cuestionario_vista, cuestionario_datos,
guardar_resultados, resultado, nuevo_cuestionario, editar_cuestionario, eliminar_cuestionario, nueva_pregunta, nueva_respuesta, agregar_categoria,
)
urlpatterns =[
path("", pagina_principal, name="pagina_principal"),
path("nuevo_cuestionario/", nuevo_cuestionario, name="nuevo_cuestionario"),
path("editar_cuestionario/<str:pk>", editar_cuestionario, name="editar_cuestionario"),
path("eliminar_cuestionario/<str:pk>", eliminar_cuestionario, name="eliminar_cuestionario"),
path("agregar_categoria", agregar_categoria, name="agregar_categoria"),
path("agregar_pregunta/", nueva_pregunta, name="agregar_pregunta"),
path("agregar_respuesta/", nueva_respuesta, name="agregar_respuesta"),
path("categorias/", CategoriasListView.as_view(), name="categorias"),
path("categorias/<str:categoria>/", CuestionarioListView.as_view(), name="categoria-actual"),
path("categorias/<str:categoria>/<int:pk>/", cuestionario_vista, name="cuestionarios"),
path("categorias/<str:categoria>/<int:pk>/jugar", cuestionario_datos, name="jugar"),
path("categorias/<str:categoria>/<int:pk>/guardar/", guardar_resultados, name="guardar_cuestionarios"),
path("resultado/<int:pk>/", resultado, name='resultado'),
]
| 45.548387 | 150 | 0.753541 | from django.urls import path
from .views import (
CategoriasListView, pagina_principal,
CuestionarioListView, cuestionario_vista, cuestionario_datos,
guardar_resultados, resultado, nuevo_cuestionario, editar_cuestionario, eliminar_cuestionario, nueva_pregunta, nueva_respuesta, agregar_categoria,
)
urlpatterns =[
path("", pagina_principal, name="pagina_principal"),
path("nuevo_cuestionario/", nuevo_cuestionario, name="nuevo_cuestionario"),
path("editar_cuestionario/<str:pk>", editar_cuestionario, name="editar_cuestionario"),
path("eliminar_cuestionario/<str:pk>", eliminar_cuestionario, name="eliminar_cuestionario"),
path("agregar_categoria", agregar_categoria, name="agregar_categoria"),
path("agregar_pregunta/", nueva_pregunta, name="agregar_pregunta"),
path("agregar_respuesta/", nueva_respuesta, name="agregar_respuesta"),
path("categorias/", CategoriasListView.as_view(), name="categorias"),
path("categorias/<str:categoria>/", CuestionarioListView.as_view(), name="categoria-actual"),
path("categorias/<str:categoria>/<int:pk>/", cuestionario_vista, name="cuestionarios"),
path("categorias/<str:categoria>/<int:pk>/jugar", cuestionario_datos, name="jugar"),
path("categorias/<str:categoria>/<int:pk>/guardar/", guardar_resultados, name="guardar_cuestionarios"),
path("resultado/<int:pk>/", resultado, name='resultado'),
]
| true | true |
f7262028bb67932989ce287fc4342964d85098d4 | 843 | py | Python | merak/commands/__init__.py | Yao1993/merak | 517b7a8eca82eebbf22bcd3688a79e1e76ed9d42 | [
"Apache-2.0"
] | 16 | 2021-01-22T04:09:30.000Z | 2022-03-17T10:38:34.000Z | merak/commands/__init__.py | Yao1993/merak | 517b7a8eca82eebbf22bcd3688a79e1e76ed9d42 | [
"Apache-2.0"
] | 6 | 2021-04-12T10:09:47.000Z | 2022-03-24T09:31:13.000Z | merak/commands/__init__.py | Yao1993/merak | 517b7a8eca82eebbf22bcd3688a79e1e76ed9d42 | [
"Apache-2.0"
] | 2 | 2021-07-14T05:39:17.000Z | 2021-07-28T16:27:40.000Z | # Copyright 2021 (David) Siu-Kei Muk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from merak.commands.cythonize import Cythonize
| 42.15 | 80 | 0.709371 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from merak.commands.cythonize import Cythonize
| true | true |
f72620bb64981cc53d557d6184ba036e5477d39e | 2,690 | py | Python | dsp/core/spider.py | gods-view/AdclickIO | ccb73867e568aac5f40bd5890149626ce0be1897 | [
"BSD-2-Clause"
] | null | null | null | dsp/core/spider.py | gods-view/AdclickIO | ccb73867e568aac5f40bd5890149626ce0be1897 | [
"BSD-2-Clause"
] | null | null | null | dsp/core/spider.py | gods-view/AdclickIO | ccb73867e568aac5f40bd5890149626ce0be1897 | [
"BSD-2-Clause"
] | null | null | null | # encoding=utf-8
import sys
from imp import reload
reload(sys)
# sys.setdefaultencoding('utf8')
import os, time
# from configure import config
from datetime import datetime as dt
import requests
# requests.adapters.DEFAULT_RETRIES = 5
# import chardet
class HttpSpider:
headers = {
# 'User-Agent':'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
# 'Connection':'keep-alive',
# 'Content-Type: application/json'
# "Accept-Encoding":"gzip,deflate"
# "Authorization": "Bearer 5f6f8a89a85b6bde10c69198ca9a2e8ea9f13bf8"
}
def __init__(self):
pass
def show_error(self, error_log, msg):
if error_log is None:
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), msg)
else:
error_log.error(msg)
def internal_get(self, url, timeout=5, headers=''):
# print (headers)
# print (type(headers["Authorization"]))
# self.headers = timeouts
# print (type(self.headers["Authorization"]))
if headers == '':
response = requests.get(url, headers=self.headers, timeout=timeout)
else:
response = requests.get(url, headers=headers, timeout=timeout)
# print (response.content)
if response:
return response.status_code, response.text
else:
return response.content, None
def internal_post(self, url, data, timeout=5, headers=''):
# print (data, type(data))
if headers == '':
response = requests.post(url, data, headers=self.headers, timeout=timeout)
else:
response = requests.post(url, data, headers=headers, timeout=timeout)
print (response, response.content)
if response:
return response.status_code, response.text
else:
return response.content, None
def internal_patch(self, url, data, timeout=5):
# print (data)
response = requests.patch(url, data, headers=self.headers, timeout=timeout)
# print (response, response.content)
if response:
return response.content, response.text
else:
return response.content, None
def internal_put(self, url, data, timeout=5, headers=''):
# print (data)
if headers == '':
response = requests.put(url, data, headers=self.headers, timeout=timeout)
else:
response = requests.put(url, data, headers=headers, timeout=timeout)
print (response, response.content)
if response:
return response.status_code, response.text
else:
return response.content, None
| 32.409639 | 89 | 0.612268 |
import sys
from imp import reload
reload(sys)
import os, time
from datetime import datetime as dt
import requests
class HttpSpider:
headers = {
}
def __init__(self):
pass
def show_error(self, error_log, msg):
if error_log is None:
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), msg)
else:
error_log.error(msg)
def internal_get(self, url, timeout=5, headers=''):
if headers == '':
response = requests.get(url, headers=self.headers, timeout=timeout)
else:
response = requests.get(url, headers=headers, timeout=timeout)
if response:
return response.status_code, response.text
else:
return response.content, None
def internal_post(self, url, data, timeout=5, headers=''):
if headers == '':
response = requests.post(url, data, headers=self.headers, timeout=timeout)
else:
response = requests.post(url, data, headers=headers, timeout=timeout)
print (response, response.content)
if response:
return response.status_code, response.text
else:
return response.content, None
def internal_patch(self, url, data, timeout=5):
response = requests.patch(url, data, headers=self.headers, timeout=timeout)
if response:
return response.content, response.text
else:
return response.content, None
def internal_put(self, url, data, timeout=5, headers=''):
if headers == '':
response = requests.put(url, data, headers=self.headers, timeout=timeout)
else:
response = requests.put(url, data, headers=headers, timeout=timeout)
print (response, response.content)
if response:
return response.status_code, response.text
else:
return response.content, None
| true | true |
f72622da8b3fb8fe690f929b9dd25369d65cc062 | 17,230 | py | Python | turing_complete_interface/circuit_builder.py | scottanderson/turing-complete-interface | 37d52265a2b7b693c1dc67505bd47622aa0f1e9f | [
"MIT"
] | 4 | 2022-01-23T20:29:16.000Z | 2022-03-20T06:10:47.000Z | turing_complete_interface/circuit_builder.py | scottanderson/turing-complete-interface | 37d52265a2b7b693c1dc67505bd47622aa0f1e9f | [
"MIT"
] | null | null | null | turing_complete_interface/circuit_builder.py | scottanderson/turing-complete-interface | 37d52265a2b7b693c1dc67505bd47622aa0f1e9f | [
"MIT"
] | 1 | 2022-01-28T02:41:25.000Z | 2022-01-28T02:41:25.000Z | from __future__ import annotations
import json
from dataclasses import dataclass, field
from math import inf, sqrt
from queue import PriorityQueue
from typing import Any, Callable, Collection, TYPE_CHECKING, Iterable, Literal
from bitarray import bitarray
from turing_complete_interface.circuit_parser import Circuit, GateReference, GateShape, CircuitWire, CircuitPin, Pos
from turing_complete_interface.truth_table import Atom
from turing_complete_interface.logic_nodes import CombinedLogicNode, NodePin
from turing_complete_interface.tc_components import get_component, rev_components
if TYPE_CHECKING:
import pydot
@dataclass
class IOPosition:
component_name: str
pin_mapping: dict[str, str]
force_id: str = None
force_position: tuple[int, int] = None
custom_data: str = ""
@classmethod
def from_circuit(cls, circuit: Circuit) -> list[IOPosition]:
ios = []
for gate in circuit.gates:
shape, _ = get_component(gate.name, gate.custom_data)
if not shape.is_io:
continue
ios.append(IOPosition(gate.name,
{gate.id: next(iter(shape.pins))}
if len(shape.pins) == 1 else {
f"{gate.id}.{pin_name}": pin_name for pin_name in shape.pins
}, gate.id, gate.pos, gate.custom_data))
return ios
@classmethod
def from_node(cls, node: CombinedLogicNode) -> list[IOPosition]:
ios = []
for name, inp in node.inputs.items():
ios.append(IOPosition("Input1" if inp.bits == 1 else "Input1B",
{name: "value"}, custom_data=name))
for name, out in node.outputs.items():
ios.append(IOPosition("Output1" if out.bits == 1 else "Output1B",
{name: "value"}, custom_data=name))
return ios
@dataclass
class Space:
x: int
y: int
w: int
h: int
_observer: Any = None
_placed_boxes: list[tuple[int, int, int, int]] = field(default_factory=list)
_protected: set[Any] = field(default_factory=set)
_taken_spaces: bitarray = None
def __post_init__(self):
self._taken_spaces = bitarray([0] * (self.w * self.h))
self._taken_spaces[:] = 0
def place(self, w: int, h: int, force_pos: tuple[int, int] = None,
hasher: Callable[[tuple[int, int]], Any] = None) -> tuple[int, int]:
if force_pos is None:
for idx in self._taken_spaces.itersearch(bitarray([0] * w)):
y, x = divmod(idx, self.w)
if x + w > self.w:
continue
if hasher is not None and hasher((x + self.x, y + self.y)) in self._protected:
continue
for k in range(h):
if idx + k * self.w + w >= self.h * self.w:
break
if self._taken_spaces[idx + k * self.w:idx + k * self.w + w].any():
break
else:
break
else:
raise ValueError(f"No space left {w}, {h}")
else:
x, y = force_pos
self._placed_boxes.append((x, y, w, h))
idx = y * self.w + x
for k in range(h):
self._taken_spaces[idx + k * self.w:idx + k * self.w + w] = 1
if hasher is not None:
self._protected.add(hasher((x + self.x, y + self.y)))
if self._observer is not None:
self._observer(self)
return x + self.x, y + self.y
def is_filled(self, x: int, y: int):
return self._taken_spaces[(y - self.y) * self.w + (x - self.x)]
def clear(self):
self._taken_spaces.clear()
self._placed_boxes.clear()
self._protected.clear()
@dataclass
class PathFinder:
circuit: Circuit
area: tuple[int, int, int, int]
taken: list[list[Literal['gate', 'pin', 'wire_end'] | None]]
def add_gate(self, gate_ref: GateReference, shape: GateShape):
for bx, by in shape.blocks:
p = gate_ref.translate((bx, by))
self.taken[p[0] - self.area[0]][p[1] - self.area[1]] = 'gate'
for pin in shape.pins.values():
p = gate_ref.translate(pin.pos)
self.taken[p[0] - self.area[0]][p[1] - self.area[1]] = 'pin'
if shape.big_shape:
for bx in range(shape.big_shape.tl[0], shape.big_shape.br[0]):
for by in range(shape.big_shape.tl[1], shape.big_shape.br[1]):
p = gate_ref.translate((bx, by))
self.taken[p[0] - self.area[0]][p[1] - self.area[1]] = 'gate'
def add_wire_end(self, pos: Pos):
self.taken[pos[0] - self.area[0]][pos[1] - self.area[1]] = 'wire_end'
def reload(self):
self.taken = [[None] * self.area[2] for _ in range(self.area[3])]
for gate in self.circuit.gates:
shape = get_component(gate, no_node=True)[0]
self.add_gate(gate, shape)
for wire in self.circuit.wires:
if wire.positions:
self.add_wire_end(wire.positions[0])
self.add_wire_end(wire.positions[-1])
def path_find(self, start: Pos, end: Pos):
start = start[0] - self.area[0], start[1] - self.area[1]
end = end[0] - self.area[0], end[1] - self.area[1]
if self.taken[end[0]][end[1]] == "gate":
return None
def h(p):
return sqrt((p[0] - end[0]) ** 2 + (p[1] - end[1]) ** 2)
def reconstruct():
path = [(end[0] + self.area[0], end[1] + self.area[1])]
current = end
while current in came_from:
current = came_from[current]
path.append((current[0] + self.area[0], current[1] + self.area[1]))
return path
queue = PriorityQueue()
queue.put((0, start))
came_from = {}
path_cost = {start: 0}
heuristic_cost = {}
while not queue.empty():
_, (x, y) = queue.get()
if (x, y) == end:
return reconstruct()
for dx in (-1, 0, 1):
if not (0 <= x + dx < self.area[2]):
continue
for dy in (-1, 0, 1):
if not (0 <= y + dy < self.area[3]):
continue
if dx == dy == 0:
continue
np = x + dx, y + dy
if self.taken[np[0]][np[1]] is not None and (np != end):
continue
new_cost = path_cost[x, y] + 1
if new_cost < path_cost.get(np, inf):
came_from[np] = x, y
path_cost[np] = new_cost
heuristic_cost[np] = new_cost + h(np)
queue.put((heuristic_cost[np], np))
return None
@classmethod
def create(cls, circuit, area):
self = cls(circuit, area, [[None] * area[3] for _ in range(area[2])])
self.reload()
return self
def build_circuit(node: CombinedLogicNode, io_positions: list[IOPosition], space: Space,
level_version: int = 0, place_alone: Collection[str] = (), place_memory_alone=True) -> Circuit:
taken_ids: set[int] = set()
i = 1
def get_id() -> int:
nonlocal i
while i in taken_ids:
i += 1
taken_ids.add(i)
return i
def place(shape: GateShape, io: bool = False, forced_pos: tuple[int, int] = None):
ox, oy, w, h = shape.bounding_box
def translate(p):
return (int((p[0] + 30 - ox) // 8 - 3), int((p[1] + 30 - oy) // 8 - 3))
if forced_pos is not None:
forced_pos = forced_pos[0] + ox - space.x, forced_pos[1] + ox - space.y
t, l = space.place(w, h, forced_pos, (translate if io else None))
return t - ox, l - oy
gate_refs = []
pin_locations: dict[NodePin, tuple[CircuitPin, int, int]] = {}
for io in io_positions:
shape, _ = get_component(io.component_name, io.custom_data)
pos = place(shape, True, io.force_position)
gi = get_id() if io.force_id is None else io.force_id
gate_refs.append(GateReference(io.component_name, pos, 0, str(gi), io.custom_data))
for node_pin_name, shape_pin_name in io.pin_mapping.items():
dp = shape.pins[shape_pin_name].pos
pin_locations[None, node_pin_name] = shape.pins[shape_pin_name], pos[0] + dp[0], pos[1] + dp[1]
for name, n in node.nodes.items():
component_name, custom_data = rev_components[n.name]
shape, inner_node = get_component(component_name, custom_data, no_node=True)
pos = place(shape, (n.name in place_alone)
or (name in place_alone)
or (place_memory_alone and inner_node.state_size != 0))
gi = get_id()
gate_refs.append(GateReference(component_name, pos, 0, str(gi), custom_data))
for node_pin_name, pin in shape.pins.items():
dp = pin.pos
pin_locations[name, node_pin_name] = pin, pos[0] + dp[0], pos[1] + dp[1]
wires = []
splitters = {}
makers = {}
bs_shape = get_component("ByteSplitter", "")[0]
bm_shape = get_component("ByteMaker", "")[0]
for wire in node.wires:
source_pin, *start = pin_locations[wire.source]
target_pin, *end = pin_locations[wire.target]
start = tuple(start)
end = tuple(end)
if not source_pin.is_byte and not target_pin.is_byte:
assert wire.source_bits == wire.target_bits == (0, 1), wire
wires.append(CircuitWire(len(wires) + 1, False, 0, "", [tuple(start), tuple(end)]))
elif source_pin.is_byte and not target_pin.is_byte:
assert wire.target_bits == (0, 1)
if start not in splitters:
pos = place(bs_shape)
splitter = splitters[start] = GateReference("ByteSplitter", pos, 0, str(get_id()), "")
gate_refs.append(splitter)
wires.append(CircuitWire(get_id(), True, 0, "", [start, bs_shape.pin_position(splitter, "in")]))
else:
splitter = splitters[start]
wires.append(CircuitWire(get_id(), False, 0, "",
[bs_shape.pin_position(splitter, f"r{wire.source_bits[0]}"), end]))
elif not source_pin.is_byte and target_pin.is_byte:
assert wire.source_bits == (0, 1)
if end not in makers:
pos = place(bm_shape)
maker = makers[end] = GateReference("ByteMaker", pos, 0, str(get_id()), "")
gate_refs.append(maker)
wires.append(CircuitWire(get_id(), True, 0, "", [bm_shape.pin_position(maker, "out"), end]))
else:
maker = makers[end]
wires.append(CircuitWire(get_id(), False, 0, "",
[start, bm_shape.pin_position(maker, f"r{wire.target_bits[0]}")]))
else:
assert False, wire
return Circuit(gate_refs, wires, 99_999, 99_999, level_version)
def to_pydot(node: CombinedLogicNode, io_positions: list[IOPosition], space: Space) -> \
tuple[dict[str, tuple[str, IOPosition]], "pydot.Dot"]:
import pydot
g = pydot.Dot(graph_type="digraph", rankdir="LR", nodesep=1, ranksep=5, splines="ortho")
for name, ln in node.nodes.items():
component_name, custom_data = rev_components[ln.name]
shape, inner_node = get_component(component_name, custom_data, no_node=True)
g.add_node(pydot.Node(name, fixedsize=True, shape="box",
width=shape.bounding_box[2], height=shape.bounding_box[3],
label=f"{component_name}[{name}]"))
io_nodes = {}
for io in io_positions:
shape, _ = get_component(io.component_name, io.custom_data)
name = "".join(io.pin_mapping)
for p in io.pin_mapping:
io_nodes[p] = name, io
g.add_node(pydot.Node(name, fixedsize=True, shape="box",
width=7, height=7,
label=f"{io.component_name}[{name}]"))
for wire in node.wires:
if wire.source[0] is None:
source = io_nodes[wire.source[1]][0]
else:
source = wire.source[0]
if wire.target[0] is None:
target = io_nodes[wire.target[1]][0]
else:
target = wire.target[0]
g.add_edge(pydot.Edge(source, target, tailport="e", headport="w", ))
return io_nodes, g
def layout_with_pydot(node: CombinedLogicNode, io_positions: list[IOPosition], space: Space) -> Circuit:
i = 1
taken_ids = set()
def get_id() -> int:
nonlocal i
while i in taken_ids:
i += 1
taken_ids.add(i)
return i
pin_to_io, graph = to_pydot(node, io_positions, space)
graph.write_svg("test.svg")
data = json.loads(graph.create(format='json0'))
del graph
ionames = {name: io for name, io in pin_to_io.values()}
gate_refs = []
pin_locations = {}
for obj in data["objects"]:
name, pos = obj['name'], obj['pos']
pos = pos.split(',')
pos = int(pos[0]) // 72 + space.x, int(pos[1]) // 72 + space.y
if name in ionames:
io: IOPosition = ionames[name]
if io.force_id is not None:
gid = int(io.force_id)
taken_ids.add(gid)
else:
gid = get_id()
component_name, custom_data = io.component_name, io.custom_data
gate_refs.append(GateReference(io.component_name, pos, 0, str(gid), io.custom_data))
shape, _ = get_component(io.component_name, io.custom_data, True)
for node_pin_name, shape_pin_name in io.pin_mapping.items():
dp = shape.pins[shape_pin_name].pos
pin_locations[None, node_pin_name] = shape.pins[shape_pin_name], pos[0] + dp[0], pos[1] + dp[1]
else:
n = node.nodes[name]
component_name, custom_data = rev_components[n.name]
shape, inner_node = get_component(component_name, custom_data, no_node=True)
gid = get_id()
for node_pin_name, pin in shape.pins.items():
dp = pin.pos
pin_locations[name, node_pin_name] = pin, pos[0] + dp[0], pos[1] + dp[1]
space.place(shape.bounding_box[2], shape.bounding_box[3], (pos[0] - space.x, pos[1] - space.y))
gate_refs.append(GateReference(component_name, pos, 0, str(gid), custom_data))
wires = []
splitters = {}
makers = {}
bs_shape = get_component("ByteSplitter", "")[0]
bm_shape = get_component("ByteMaker", "")[0]
for wire in node.wires:
source_pin, *start = pin_locations[wire.source]
target_pin, *end = pin_locations[wire.target]
start = tuple(start)
end = tuple(end)
if not source_pin.is_byte and not target_pin.is_byte:
assert wire.source_bits == wire.target_bits == (0, 1), wire
wires.append(CircuitWire(len(wires) + 1, False, 0, "", [tuple(start), tuple(end)]))
elif source_pin.is_byte and not target_pin.is_byte:
assert wire.target_bits == (0, 1)
if start not in splitters:
t, l = space.place(bm_shape.bounding_box[2], bm_shape.bounding_box[3])
pos = t - bm_shape.bounding_box[0], l - bm_shape.bounding_box[1]
splitter = splitters[start] = GateReference("ByteSplitter", pos, 0, str(get_id()), "")
gate_refs.append(splitter)
wires.append(CircuitWire(get_id(), True, 0, "", [start, bs_shape.pin_position(splitter, "in")]))
else:
splitter = splitters[start]
wires.append(CircuitWire(get_id(), False, 0, "",
[bs_shape.pin_position(splitter, f"r{wire.source_bits[0]}"), end]))
elif not source_pin.is_byte and target_pin.is_byte:
assert wire.source_bits == (0, 1)
if end not in makers:
t, l = space.place(bm_shape.bounding_box[2], bm_shape.bounding_box[3])
pos = t - bm_shape.bounding_box[0], l - bm_shape.bounding_box[1]
maker = makers[end] = GateReference("ByteMaker", pos, 0, str(get_id()), "")
gate_refs.append(maker)
wires.append(CircuitWire(get_id(), True, 0, "", [bm_shape.pin_position(maker, "out"), end]))
else:
maker = makers[end]
wires.append(CircuitWire(get_id(), False, 0, "",
[start, bm_shape.pin_position(maker, f"r{wire.target_bits[0]}")]))
else:
assert False, wire
return Circuit(gate_refs, wires)
GateType = Literal["and", "or", "nor", "nand"]
def layout_two_levels(inputs: list[str], first: Iterable[tuple[str, tuple[Atom, ...], GateType]],
second: Iterable[tuple[str, str, bool]], use_buffer: bool = True):
pass
| 41.121718 | 116 | 0.556878 | from __future__ import annotations
import json
from dataclasses import dataclass, field
from math import inf, sqrt
from queue import PriorityQueue
from typing import Any, Callable, Collection, TYPE_CHECKING, Iterable, Literal
from bitarray import bitarray
from turing_complete_interface.circuit_parser import Circuit, GateReference, GateShape, CircuitWire, CircuitPin, Pos
from turing_complete_interface.truth_table import Atom
from turing_complete_interface.logic_nodes import CombinedLogicNode, NodePin
from turing_complete_interface.tc_components import get_component, rev_components
if TYPE_CHECKING:
import pydot
@dataclass
class IOPosition:
component_name: str
pin_mapping: dict[str, str]
force_id: str = None
force_position: tuple[int, int] = None
custom_data: str = ""
@classmethod
def from_circuit(cls, circuit: Circuit) -> list[IOPosition]:
ios = []
for gate in circuit.gates:
shape, _ = get_component(gate.name, gate.custom_data)
if not shape.is_io:
continue
ios.append(IOPosition(gate.name,
{gate.id: next(iter(shape.pins))}
if len(shape.pins) == 1 else {
f"{gate.id}.{pin_name}": pin_name for pin_name in shape.pins
}, gate.id, gate.pos, gate.custom_data))
return ios
@classmethod
def from_node(cls, node: CombinedLogicNode) -> list[IOPosition]:
ios = []
for name, inp in node.inputs.items():
ios.append(IOPosition("Input1" if inp.bits == 1 else "Input1B",
{name: "value"}, custom_data=name))
for name, out in node.outputs.items():
ios.append(IOPosition("Output1" if out.bits == 1 else "Output1B",
{name: "value"}, custom_data=name))
return ios
@dataclass
class Space:
x: int
y: int
w: int
h: int
_observer: Any = None
_placed_boxes: list[tuple[int, int, int, int]] = field(default_factory=list)
_protected: set[Any] = field(default_factory=set)
_taken_spaces: bitarray = None
def __post_init__(self):
self._taken_spaces = bitarray([0] * (self.w * self.h))
self._taken_spaces[:] = 0
def place(self, w: int, h: int, force_pos: tuple[int, int] = None,
hasher: Callable[[tuple[int, int]], Any] = None) -> tuple[int, int]:
if force_pos is None:
for idx in self._taken_spaces.itersearch(bitarray([0] * w)):
y, x = divmod(idx, self.w)
if x + w > self.w:
continue
if hasher is not None and hasher((x + self.x, y + self.y)) in self._protected:
continue
for k in range(h):
if idx + k * self.w + w >= self.h * self.w:
break
if self._taken_spaces[idx + k * self.w:idx + k * self.w + w].any():
break
else:
break
else:
raise ValueError(f"No space left {w}, {h}")
else:
x, y = force_pos
self._placed_boxes.append((x, y, w, h))
idx = y * self.w + x
for k in range(h):
self._taken_spaces[idx + k * self.w:idx + k * self.w + w] = 1
if hasher is not None:
self._protected.add(hasher((x + self.x, y + self.y)))
if self._observer is not None:
self._observer(self)
return x + self.x, y + self.y
def is_filled(self, x: int, y: int):
return self._taken_spaces[(y - self.y) * self.w + (x - self.x)]
def clear(self):
self._taken_spaces.clear()
self._placed_boxes.clear()
self._protected.clear()
@dataclass
class PathFinder:
circuit: Circuit
area: tuple[int, int, int, int]
taken: list[list[Literal['gate', 'pin', 'wire_end'] | None]]
def add_gate(self, gate_ref: GateReference, shape: GateShape):
for bx, by in shape.blocks:
p = gate_ref.translate((bx, by))
self.taken[p[0] - self.area[0]][p[1] - self.area[1]] = 'gate'
for pin in shape.pins.values():
p = gate_ref.translate(pin.pos)
self.taken[p[0] - self.area[0]][p[1] - self.area[1]] = 'pin'
if shape.big_shape:
for bx in range(shape.big_shape.tl[0], shape.big_shape.br[0]):
for by in range(shape.big_shape.tl[1], shape.big_shape.br[1]):
p = gate_ref.translate((bx, by))
self.taken[p[0] - self.area[0]][p[1] - self.area[1]] = 'gate'
def add_wire_end(self, pos: Pos):
self.taken[pos[0] - self.area[0]][pos[1] - self.area[1]] = 'wire_end'
def reload(self):
self.taken = [[None] * self.area[2] for _ in range(self.area[3])]
for gate in self.circuit.gates:
shape = get_component(gate, no_node=True)[0]
self.add_gate(gate, shape)
for wire in self.circuit.wires:
if wire.positions:
self.add_wire_end(wire.positions[0])
self.add_wire_end(wire.positions[-1])
def path_find(self, start: Pos, end: Pos):
start = start[0] - self.area[0], start[1] - self.area[1]
end = end[0] - self.area[0], end[1] - self.area[1]
if self.taken[end[0]][end[1]] == "gate":
return None
def h(p):
return sqrt((p[0] - end[0]) ** 2 + (p[1] - end[1]) ** 2)
def reconstruct():
path = [(end[0] + self.area[0], end[1] + self.area[1])]
current = end
while current in came_from:
current = came_from[current]
path.append((current[0] + self.area[0], current[1] + self.area[1]))
return path
queue = PriorityQueue()
queue.put((0, start))
came_from = {}
path_cost = {start: 0}
heuristic_cost = {}
while not queue.empty():
_, (x, y) = queue.get()
if (x, y) == end:
return reconstruct()
for dx in (-1, 0, 1):
if not (0 <= x + dx < self.area[2]):
continue
for dy in (-1, 0, 1):
if not (0 <= y + dy < self.area[3]):
continue
if dx == dy == 0:
continue
np = x + dx, y + dy
if self.taken[np[0]][np[1]] is not None and (np != end):
continue
new_cost = path_cost[x, y] + 1
if new_cost < path_cost.get(np, inf):
came_from[np] = x, y
path_cost[np] = new_cost
heuristic_cost[np] = new_cost + h(np)
queue.put((heuristic_cost[np], np))
return None
@classmethod
def create(cls, circuit, area):
self = cls(circuit, area, [[None] * area[3] for _ in range(area[2])])
self.reload()
return self
def build_circuit(node: CombinedLogicNode, io_positions: list[IOPosition], space: Space,
level_version: int = 0, place_alone: Collection[str] = (), place_memory_alone=True) -> Circuit:
taken_ids: set[int] = set()
i = 1
def get_id() -> int:
nonlocal i
while i in taken_ids:
i += 1
taken_ids.add(i)
return i
def place(shape: GateShape, io: bool = False, forced_pos: tuple[int, int] = None):
ox, oy, w, h = shape.bounding_box
def translate(p):
return (int((p[0] + 30 - ox) // 8 - 3), int((p[1] + 30 - oy) // 8 - 3))
if forced_pos is not None:
forced_pos = forced_pos[0] + ox - space.x, forced_pos[1] + ox - space.y
t, l = space.place(w, h, forced_pos, (translate if io else None))
return t - ox, l - oy
gate_refs = []
pin_locations: dict[NodePin, tuple[CircuitPin, int, int]] = {}
for io in io_positions:
shape, _ = get_component(io.component_name, io.custom_data)
pos = place(shape, True, io.force_position)
gi = get_id() if io.force_id is None else io.force_id
gate_refs.append(GateReference(io.component_name, pos, 0, str(gi), io.custom_data))
for node_pin_name, shape_pin_name in io.pin_mapping.items():
dp = shape.pins[shape_pin_name].pos
pin_locations[None, node_pin_name] = shape.pins[shape_pin_name], pos[0] + dp[0], pos[1] + dp[1]
for name, n in node.nodes.items():
component_name, custom_data = rev_components[n.name]
shape, inner_node = get_component(component_name, custom_data, no_node=True)
pos = place(shape, (n.name in place_alone)
or (name in place_alone)
or (place_memory_alone and inner_node.state_size != 0))
gi = get_id()
gate_refs.append(GateReference(component_name, pos, 0, str(gi), custom_data))
for node_pin_name, pin in shape.pins.items():
dp = pin.pos
pin_locations[name, node_pin_name] = pin, pos[0] + dp[0], pos[1] + dp[1]
wires = []
splitters = {}
makers = {}
bs_shape = get_component("ByteSplitter", "")[0]
bm_shape = get_component("ByteMaker", "")[0]
for wire in node.wires:
source_pin, *start = pin_locations[wire.source]
target_pin, *end = pin_locations[wire.target]
start = tuple(start)
end = tuple(end)
if not source_pin.is_byte and not target_pin.is_byte:
assert wire.source_bits == wire.target_bits == (0, 1), wire
wires.append(CircuitWire(len(wires) + 1, False, 0, "", [tuple(start), tuple(end)]))
elif source_pin.is_byte and not target_pin.is_byte:
assert wire.target_bits == (0, 1)
if start not in splitters:
pos = place(bs_shape)
splitter = splitters[start] = GateReference("ByteSplitter", pos, 0, str(get_id()), "")
gate_refs.append(splitter)
wires.append(CircuitWire(get_id(), True, 0, "", [start, bs_shape.pin_position(splitter, "in")]))
else:
splitter = splitters[start]
wires.append(CircuitWire(get_id(), False, 0, "",
[bs_shape.pin_position(splitter, f"r{wire.source_bits[0]}"), end]))
elif not source_pin.is_byte and target_pin.is_byte:
assert wire.source_bits == (0, 1)
if end not in makers:
pos = place(bm_shape)
maker = makers[end] = GateReference("ByteMaker", pos, 0, str(get_id()), "")
gate_refs.append(maker)
wires.append(CircuitWire(get_id(), True, 0, "", [bm_shape.pin_position(maker, "out"), end]))
else:
maker = makers[end]
wires.append(CircuitWire(get_id(), False, 0, "",
[start, bm_shape.pin_position(maker, f"r{wire.target_bits[0]}")]))
else:
assert False, wire
return Circuit(gate_refs, wires, 99_999, 99_999, level_version)
def to_pydot(node: CombinedLogicNode, io_positions: list[IOPosition], space: Space) -> \
tuple[dict[str, tuple[str, IOPosition]], "pydot.Dot"]:
import pydot
g = pydot.Dot(graph_type="digraph", rankdir="LR", nodesep=1, ranksep=5, splines="ortho")
for name, ln in node.nodes.items():
component_name, custom_data = rev_components[ln.name]
shape, inner_node = get_component(component_name, custom_data, no_node=True)
g.add_node(pydot.Node(name, fixedsize=True, shape="box",
width=shape.bounding_box[2], height=shape.bounding_box[3],
label=f"{component_name}[{name}]"))
io_nodes = {}
for io in io_positions:
shape, _ = get_component(io.component_name, io.custom_data)
name = "".join(io.pin_mapping)
for p in io.pin_mapping:
io_nodes[p] = name, io
g.add_node(pydot.Node(name, fixedsize=True, shape="box",
width=7, height=7,
label=f"{io.component_name}[{name}]"))
for wire in node.wires:
if wire.source[0] is None:
source = io_nodes[wire.source[1]][0]
else:
source = wire.source[0]
if wire.target[0] is None:
target = io_nodes[wire.target[1]][0]
else:
target = wire.target[0]
g.add_edge(pydot.Edge(source, target, tailport="e", headport="w", ))
return io_nodes, g
def layout_with_pydot(node: CombinedLogicNode, io_positions: list[IOPosition], space: Space) -> Circuit:
i = 1
taken_ids = set()
def get_id() -> int:
nonlocal i
while i in taken_ids:
i += 1
taken_ids.add(i)
return i
pin_to_io, graph = to_pydot(node, io_positions, space)
graph.write_svg("test.svg")
data = json.loads(graph.create(format='json0'))
del graph
ionames = {name: io for name, io in pin_to_io.values()}
gate_refs = []
pin_locations = {}
for obj in data["objects"]:
name, pos = obj['name'], obj['pos']
pos = pos.split(',')
pos = int(pos[0]) // 72 + space.x, int(pos[1]) // 72 + space.y
if name in ionames:
io: IOPosition = ionames[name]
if io.force_id is not None:
gid = int(io.force_id)
taken_ids.add(gid)
else:
gid = get_id()
component_name, custom_data = io.component_name, io.custom_data
gate_refs.append(GateReference(io.component_name, pos, 0, str(gid), io.custom_data))
shape, _ = get_component(io.component_name, io.custom_data, True)
for node_pin_name, shape_pin_name in io.pin_mapping.items():
dp = shape.pins[shape_pin_name].pos
pin_locations[None, node_pin_name] = shape.pins[shape_pin_name], pos[0] + dp[0], pos[1] + dp[1]
else:
n = node.nodes[name]
component_name, custom_data = rev_components[n.name]
shape, inner_node = get_component(component_name, custom_data, no_node=True)
gid = get_id()
for node_pin_name, pin in shape.pins.items():
dp = pin.pos
pin_locations[name, node_pin_name] = pin, pos[0] + dp[0], pos[1] + dp[1]
space.place(shape.bounding_box[2], shape.bounding_box[3], (pos[0] - space.x, pos[1] - space.y))
gate_refs.append(GateReference(component_name, pos, 0, str(gid), custom_data))
wires = []
splitters = {}
makers = {}
bs_shape = get_component("ByteSplitter", "")[0]
bm_shape = get_component("ByteMaker", "")[0]
for wire in node.wires:
source_pin, *start = pin_locations[wire.source]
target_pin, *end = pin_locations[wire.target]
start = tuple(start)
end = tuple(end)
if not source_pin.is_byte and not target_pin.is_byte:
assert wire.source_bits == wire.target_bits == (0, 1), wire
wires.append(CircuitWire(len(wires) + 1, False, 0, "", [tuple(start), tuple(end)]))
elif source_pin.is_byte and not target_pin.is_byte:
assert wire.target_bits == (0, 1)
if start not in splitters:
t, l = space.place(bm_shape.bounding_box[2], bm_shape.bounding_box[3])
pos = t - bm_shape.bounding_box[0], l - bm_shape.bounding_box[1]
splitter = splitters[start] = GateReference("ByteSplitter", pos, 0, str(get_id()), "")
gate_refs.append(splitter)
wires.append(CircuitWire(get_id(), True, 0, "", [start, bs_shape.pin_position(splitter, "in")]))
else:
splitter = splitters[start]
wires.append(CircuitWire(get_id(), False, 0, "",
[bs_shape.pin_position(splitter, f"r{wire.source_bits[0]}"), end]))
elif not source_pin.is_byte and target_pin.is_byte:
assert wire.source_bits == (0, 1)
if end not in makers:
t, l = space.place(bm_shape.bounding_box[2], bm_shape.bounding_box[3])
pos = t - bm_shape.bounding_box[0], l - bm_shape.bounding_box[1]
maker = makers[end] = GateReference("ByteMaker", pos, 0, str(get_id()), "")
gate_refs.append(maker)
wires.append(CircuitWire(get_id(), True, 0, "", [bm_shape.pin_position(maker, "out"), end]))
else:
maker = makers[end]
wires.append(CircuitWire(get_id(), False, 0, "",
[start, bm_shape.pin_position(maker, f"r{wire.target_bits[0]}")]))
else:
assert False, wire
return Circuit(gate_refs, wires)
GateType = Literal["and", "or", "nor", "nand"]
def layout_two_levels(inputs: list[str], first: Iterable[tuple[str, tuple[Atom, ...], GateType]],
second: Iterable[tuple[str, str, bool]], use_buffer: bool = True):
pass
| true | true |
f72623295ea209ba44d041366790eed08ce7ecff | 7,709 | py | Python | dvc/ignore.py | iksnagreb/dvc | a077135d76bd30205ca6db75bb76c55465be5255 | [
"Apache-2.0"
] | 1 | 2020-08-12T22:51:45.000Z | 2020-08-12T22:51:45.000Z | dvc/ignore.py | iksnagreb/dvc | a077135d76bd30205ca6db75bb76c55465be5255 | [
"Apache-2.0"
] | null | null | null | dvc/ignore.py | iksnagreb/dvc | a077135d76bd30205ca6db75bb76c55465be5255 | [
"Apache-2.0"
] | 1 | 2020-11-28T11:47:48.000Z | 2020-11-28T11:47:48.000Z | import logging
import os
import re
from itertools import groupby
from pathspec.patterns import GitWildMatchPattern
from pathspec.util import normalize_file
from pygtrie import StringTrie
from dvc.path_info import PathInfo
from dvc.pathspec_math import merge_patterns
from dvc.system import System
from dvc.utils import relpath
logger = logging.getLogger(__name__)
class DvcIgnore:
DVCIGNORE_FILE = ".dvcignore"
def __call__(self, root, dirs, files):
raise NotImplementedError
class DvcIgnorePatterns(DvcIgnore):
def __init__(self, pattern_list, dirname):
self.pattern_list = pattern_list
self.dirname = dirname
self.prefix = self.dirname + os.sep
regex_pattern_list = map(
GitWildMatchPattern.pattern_to_regex, pattern_list
)
self.ignore_spec = [
(ignore, re.compile("|".join(item[0] for item in group)))
for ignore, group in groupby(regex_pattern_list, lambda x: x[1])
if ignore is not None
]
@classmethod
def from_files(cls, ignore_file_path, tree):
assert os.path.isabs(ignore_file_path)
dirname = os.path.normpath(os.path.dirname(ignore_file_path))
with tree.open(ignore_file_path, encoding="utf-8") as fobj:
path_spec_lines = [
line for line in map(str.strip, fobj.readlines()) if line
]
return cls(path_spec_lines, dirname)
def __call__(self, root, dirs, files):
files = [f for f in files if not self.matches(root, f)]
dirs = [d for d in dirs if not self.matches(root, d, True)]
return dirs, files
def matches(self, dirname, basename, is_dir=False):
# NOTE: `relpath` is too slow, so we have to assume that both
# `dirname` and `self.dirname` are relative or absolute together.
if dirname == self.dirname:
path = basename
elif dirname.startswith(self.prefix):
rel = dirname[len(self.prefix) :]
# NOTE: `os.path.join` is ~x5.5 slower
path = f"{rel}{os.sep}{basename}"
else:
return False
if not System.is_unix():
path = normalize_file(path)
return self.ignore(path, is_dir)
def ignore(self, path, is_dir):
result = False
if is_dir:
path_dir = f"{path}/"
for ignore, pattern in self.ignore_spec:
if pattern.match(path) or pattern.match(path_dir):
result = ignore
else:
for ignore, pattern in self.ignore_spec:
if pattern.match(path):
result = ignore
return result
def __hash__(self):
return hash(self.dirname + ":" + "\n".join(self.pattern_list))
def __eq__(self, other):
if not isinstance(other, DvcIgnorePatterns):
return NotImplemented
return (self.dirname == other.dirname) & (
self.pattern_list == other.pattern_list
)
def __bool__(self):
return bool(self.pattern_list)
class DvcIgnorePatternsTrie(DvcIgnore):
trie = None
def __init__(self):
if self.trie is None:
self.trie = StringTrie(separator=os.sep)
def __call__(self, root, dirs, files):
ignore_pattern = self[root]
if ignore_pattern:
return ignore_pattern(root, dirs, files)
return dirs, files
def __setitem__(self, root, ignore_pattern):
base_pattern = self[root]
common_dirname, merged_pattern = merge_patterns(
base_pattern.dirname,
base_pattern.pattern_list,
ignore_pattern.dirname,
ignore_pattern.pattern_list,
)
self.trie[root] = DvcIgnorePatterns(merged_pattern, common_dirname)
def __getitem__(self, root):
ignore_pattern = self.trie.longest_prefix(root)
if ignore_pattern:
return ignore_pattern.value
return DvcIgnorePatterns([], root)
class DvcIgnoreDirs(DvcIgnore):
def __init__(self, basenames):
self.basenames = set(basenames)
def __call__(self, root, dirs, files):
dirs = [d for d in dirs if d not in self.basenames]
return dirs, files
def __hash__(self):
return hash(tuple(self.basenames))
def __eq__(self, other):
if not isinstance(other, DvcIgnoreDirs):
return NotImplemented
return self.basenames == other.basenames
class DvcIgnoreRepo(DvcIgnore):
def __call__(self, root, dirs, files):
def is_dvc_repo(directory):
from dvc.repo import Repo
return os.path.isdir(os.path.join(root, directory, Repo.DVC_DIR))
dirs = [d for d in dirs if not is_dvc_repo(d)]
return dirs, files
class DvcIgnoreFilterNoop:
def __init__(self, tree, root_dir):
pass
def __call__(self, root, dirs, files):
return dirs, files
def is_ignored_dir(self, _):
return False
def is_ignored_file(self, _):
return False
class DvcIgnoreFilter:
def __init__(self, tree, root_dir):
self.tree = tree
self.root_dir = root_dir
self.ignores = {
DvcIgnoreDirs([".git", ".hg", ".dvc"]),
DvcIgnoreRepo(),
}
ignore_pattern_trie = DvcIgnorePatternsTrie()
for root, dirs, _ in self.tree.walk(self.root_dir):
ignore_pattern = self._get_ignore_pattern(root)
if ignore_pattern:
ignore_pattern_trie[root] = ignore_pattern
self.ignores.add(ignore_pattern_trie)
dirs[:], _ = self(root, dirs, [])
def _get_ignore_pattern(self, dirname):
ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE)
if self.tree.exists(ignore_file_path):
return DvcIgnorePatterns.from_files(ignore_file_path, self.tree)
return None
def __call__(self, root, dirs, files):
for ignore in self.ignores:
dirs, files = ignore(root, dirs, files)
return dirs, files
def is_ignored_dir(self, path):
if not self._parents_exist(path):
return True
path = os.path.abspath(path)
if path == self.root_dir:
return False
dirname, basename = os.path.split(path)
dirs, _ = self(dirname, [basename], [])
return not dirs
def is_ignored_file(self, path):
if not self._parents_exist(path):
return True
dirname, basename = os.path.split(os.path.normpath(path))
_, files = self(os.path.abspath(dirname), [], [basename])
return not files
def _parents_exist(self, path):
from dvc.repo import Repo
path = PathInfo(path)
# if parent is root_dir or inside a .dvc dir we can skip this check
if path.parent == self.root_dir or Repo.DVC_DIR in path.parts:
return True
# paths outside of the repo should be ignored
path = relpath(path, self.root_dir)
if path.startswith("..") or (
os.name == "nt"
and not os.path.commonprefix(
[os.path.abspath(path), self.root_dir]
)
):
return False
# check if parent directories are in our ignores, starting from
# root_dir
for parent_dir in reversed(PathInfo(path).parents):
dirname, basename = os.path.split(parent_dir)
if basename == ".":
# parent_dir == root_dir
continue
dirs, _ = self(os.path.abspath(dirname), [basename], [])
if not dirs:
return False
return True
| 30.350394 | 77 | 0.609936 | import logging
import os
import re
from itertools import groupby
from pathspec.patterns import GitWildMatchPattern
from pathspec.util import normalize_file
from pygtrie import StringTrie
from dvc.path_info import PathInfo
from dvc.pathspec_math import merge_patterns
from dvc.system import System
from dvc.utils import relpath
logger = logging.getLogger(__name__)
class DvcIgnore:
DVCIGNORE_FILE = ".dvcignore"
def __call__(self, root, dirs, files):
raise NotImplementedError
class DvcIgnorePatterns(DvcIgnore):
def __init__(self, pattern_list, dirname):
self.pattern_list = pattern_list
self.dirname = dirname
self.prefix = self.dirname + os.sep
regex_pattern_list = map(
GitWildMatchPattern.pattern_to_regex, pattern_list
)
self.ignore_spec = [
(ignore, re.compile("|".join(item[0] for item in group)))
for ignore, group in groupby(regex_pattern_list, lambda x: x[1])
if ignore is not None
]
@classmethod
def from_files(cls, ignore_file_path, tree):
assert os.path.isabs(ignore_file_path)
dirname = os.path.normpath(os.path.dirname(ignore_file_path))
with tree.open(ignore_file_path, encoding="utf-8") as fobj:
path_spec_lines = [
line for line in map(str.strip, fobj.readlines()) if line
]
return cls(path_spec_lines, dirname)
def __call__(self, root, dirs, files):
files = [f for f in files if not self.matches(root, f)]
dirs = [d for d in dirs if not self.matches(root, d, True)]
return dirs, files
def matches(self, dirname, basename, is_dir=False):
if dirname == self.dirname:
path = basename
elif dirname.startswith(self.prefix):
rel = dirname[len(self.prefix) :]
path = f"{rel}{os.sep}{basename}"
else:
return False
if not System.is_unix():
path = normalize_file(path)
return self.ignore(path, is_dir)
def ignore(self, path, is_dir):
result = False
if is_dir:
path_dir = f"{path}/"
for ignore, pattern in self.ignore_spec:
if pattern.match(path) or pattern.match(path_dir):
result = ignore
else:
for ignore, pattern in self.ignore_spec:
if pattern.match(path):
result = ignore
return result
def __hash__(self):
return hash(self.dirname + ":" + "\n".join(self.pattern_list))
def __eq__(self, other):
if not isinstance(other, DvcIgnorePatterns):
return NotImplemented
return (self.dirname == other.dirname) & (
self.pattern_list == other.pattern_list
)
def __bool__(self):
return bool(self.pattern_list)
class DvcIgnorePatternsTrie(DvcIgnore):
trie = None
def __init__(self):
if self.trie is None:
self.trie = StringTrie(separator=os.sep)
def __call__(self, root, dirs, files):
ignore_pattern = self[root]
if ignore_pattern:
return ignore_pattern(root, dirs, files)
return dirs, files
def __setitem__(self, root, ignore_pattern):
base_pattern = self[root]
common_dirname, merged_pattern = merge_patterns(
base_pattern.dirname,
base_pattern.pattern_list,
ignore_pattern.dirname,
ignore_pattern.pattern_list,
)
self.trie[root] = DvcIgnorePatterns(merged_pattern, common_dirname)
def __getitem__(self, root):
ignore_pattern = self.trie.longest_prefix(root)
if ignore_pattern:
return ignore_pattern.value
return DvcIgnorePatterns([], root)
class DvcIgnoreDirs(DvcIgnore):
def __init__(self, basenames):
self.basenames = set(basenames)
def __call__(self, root, dirs, files):
dirs = [d for d in dirs if d not in self.basenames]
return dirs, files
def __hash__(self):
return hash(tuple(self.basenames))
def __eq__(self, other):
if not isinstance(other, DvcIgnoreDirs):
return NotImplemented
return self.basenames == other.basenames
class DvcIgnoreRepo(DvcIgnore):
def __call__(self, root, dirs, files):
def is_dvc_repo(directory):
from dvc.repo import Repo
return os.path.isdir(os.path.join(root, directory, Repo.DVC_DIR))
dirs = [d for d in dirs if not is_dvc_repo(d)]
return dirs, files
class DvcIgnoreFilterNoop:
def __init__(self, tree, root_dir):
pass
def __call__(self, root, dirs, files):
return dirs, files
def is_ignored_dir(self, _):
return False
def is_ignored_file(self, _):
return False
class DvcIgnoreFilter:
def __init__(self, tree, root_dir):
self.tree = tree
self.root_dir = root_dir
self.ignores = {
DvcIgnoreDirs([".git", ".hg", ".dvc"]),
DvcIgnoreRepo(),
}
ignore_pattern_trie = DvcIgnorePatternsTrie()
for root, dirs, _ in self.tree.walk(self.root_dir):
ignore_pattern = self._get_ignore_pattern(root)
if ignore_pattern:
ignore_pattern_trie[root] = ignore_pattern
self.ignores.add(ignore_pattern_trie)
dirs[:], _ = self(root, dirs, [])
def _get_ignore_pattern(self, dirname):
ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE)
if self.tree.exists(ignore_file_path):
return DvcIgnorePatterns.from_files(ignore_file_path, self.tree)
return None
def __call__(self, root, dirs, files):
for ignore in self.ignores:
dirs, files = ignore(root, dirs, files)
return dirs, files
def is_ignored_dir(self, path):
if not self._parents_exist(path):
return True
path = os.path.abspath(path)
if path == self.root_dir:
return False
dirname, basename = os.path.split(path)
dirs, _ = self(dirname, [basename], [])
return not dirs
def is_ignored_file(self, path):
if not self._parents_exist(path):
return True
dirname, basename = os.path.split(os.path.normpath(path))
_, files = self(os.path.abspath(dirname), [], [basename])
return not files
def _parents_exist(self, path):
from dvc.repo import Repo
path = PathInfo(path)
if path.parent == self.root_dir or Repo.DVC_DIR in path.parts:
return True
path = relpath(path, self.root_dir)
if path.startswith("..") or (
os.name == "nt"
and not os.path.commonprefix(
[os.path.abspath(path), self.root_dir]
)
):
return False
for parent_dir in reversed(PathInfo(path).parents):
dirname, basename = os.path.split(parent_dir)
if basename == ".":
continue
dirs, _ = self(os.path.abspath(dirname), [basename], [])
if not dirs:
return False
return True
| true | true |
f726248f250b43e625a1784113284065ec6f8efa | 6,073 | py | Python | app/auth/routes.py | Alexsik76/flask_blog | e780469afe246a56c4e5c5744d16cf8cb7da9374 | [
"Apache-2.0"
] | null | null | null | app/auth/routes.py | Alexsik76/flask_blog | e780469afe246a56c4e5c5744d16cf8cb7da9374 | [
"Apache-2.0"
] | null | null | null | app/auth/routes.py | Alexsik76/flask_blog | e780469afe246a56c4e5c5744d16cf8cb7da9374 | [
"Apache-2.0"
] | null | null | null | import os
from functools import wraps
from flask import flash, redirect, render_template, url_for, current_app, Markup, request
from flask_login import login_user, login_required, logout_user, current_user
from app.auth import bp
from app.auth.forms import SignUpForm, RegistrationForm, LoginForm, ResetPasswordForm, NewPasswordForm, UserForm
from app.auth.email import send_email
from itsdangerous import URLSafeTimedSerializer
from app.models import User
from app import db
def offer_to_log_in(email: str):
href = f"""<a href="{url_for('auth.login', email=email)}" class="danger-link">Log In</a>"""
message = f"The email: {email} is used. Please {href}."
flash(Markup(message), 'danger')
def get_email_from_token(token):
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
return email
def redirect_authenticated(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_authenticated and current_user.email == get_email_from_token(kwargs.get('token')):
return redirect(url_for('main.index'))
return f(*args, **kwargs)
return decorated_function
@bp.route('/signup', methods=['GET', 'POST'])
async def signup():
form = SignUpForm()
is_busy = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit() and not is_busy:
res = await send_email(form.email.data, goal='registration')
print(res)
flash(f'To continue registration, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
elif is_busy:
offer_to_log_in(form.email.data)
return render_template('auth/signup.html', form=form)
@bp.route('/register/<token>', methods=['GET', 'POST'])
@redirect_authenticated
def register(token):
form = RegistrationForm()
email = get_email_from_token(token)
if bool(User.query.filter_by(email=email).first()):
offer_to_log_in(email)
return redirect(url_for('main.index'))
form.email.data = email
if form.validate_on_submit():
new_user = User(
email=email, # noqa
first_name=form.first_name.data, # noqa
last_name=form.last_name.data, # noqa
is_admin=True if email == current_app.config['ADMIN_EMAIL'] else False # noqa
)
new_user.set_password(form.password.data)
print(f'{new_user.is_admin:=}')
db.session.add(new_user)
db.session.commit()
if not os.path.isdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id))):
os.mkdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id)))
flash('You can log in', 'success')
return redirect(url_for('main.index'))
return render_template('auth/register.html', form=form)
@bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if email := request.args.get('email'):
form.email.data = email
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if not user:
flash(f'User with email {form.email.data} not registered', 'danger')
return redirect(url_for('auth.signup'))
elif not user.check_password(form.password.data):
flash('Wrong password', 'danger')
return redirect(url_for('main.index'))
else:
login_user(user, remember=form.remember_me.data)
flash(f'Hi, {user.first_name}!', 'success')
return redirect(url_for('main.index'))
return render_template('auth/login.html', form=form)
@bp.route('/log_out', methods=['GET', 'POST'])
@login_required
def log_out():
logout_user()
flash('You are logged out', 'info')
return redirect(url_for('main.index'))
@bp.route('/reset_password', methods=['GET', 'POST'])
def reset_password():
form = ResetPasswordForm()
if current_user.is_authenticated:
form.email.data = current_user.email
form.email.render_kw = {'disabled': True}
is_present = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit():
if is_present:
send_email(form.email.data, goal='reset')
flash('To continue reset password, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
else:
href = f"""<a href="{url_for('auth.signup', email=form.email.data)}" class="danger-link">Sign up</a>"""
message = f"The email: {form.email.data} not founded. Please {href} or use correct email."
flash(Markup(message), 'danger')
return render_template('auth/signup.html', form=form)
@bp.route('/new_password/<token>', methods=['GET', 'POST'])
def new_password(token):
form = NewPasswordForm()
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
form.email.data = email
user = User.query.filter_by(email=email).first()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Password was changed. You can log in', 'success')
return redirect(url_for('main.index'))
elif form.is_submitted():
return render_template('auth/new_password.html', form=form), 422
return render_template('auth/new_password.html', form=form)
@bp.route('/user_page', methods=['GET', 'POST'])
@login_required
def user_page():
form = UserForm(obj=current_user)
if form.validate_on_submit():
is_changed = False
for field in 'email', 'first_name', 'last_name':
if getattr(form, field).data is not getattr(current_user, field):
setattr(current_user, field, getattr(form, field).data)
is_changed = True
if is_changed:
db.session.commit()
return render_template('auth/user_page.html', form=form)
| 39.953947 | 115 | 0.668698 | import os
from functools import wraps
from flask import flash, redirect, render_template, url_for, current_app, Markup, request
from flask_login import login_user, login_required, logout_user, current_user
from app.auth import bp
from app.auth.forms import SignUpForm, RegistrationForm, LoginForm, ResetPasswordForm, NewPasswordForm, UserForm
from app.auth.email import send_email
from itsdangerous import URLSafeTimedSerializer
from app.models import User
from app import db
def offer_to_log_in(email: str):
href = f"""<a href="{url_for('auth.login', email=email)}" class="danger-link">Log In</a>"""
message = f"The email: {email} is used. Please {href}."
flash(Markup(message), 'danger')
def get_email_from_token(token):
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
return email
def redirect_authenticated(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_authenticated and current_user.email == get_email_from_token(kwargs.get('token')):
return redirect(url_for('main.index'))
return f(*args, **kwargs)
return decorated_function
@bp.route('/signup', methods=['GET', 'POST'])
async def signup():
form = SignUpForm()
is_busy = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit() and not is_busy:
res = await send_email(form.email.data, goal='registration')
print(res)
flash(f'To continue registration, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
elif is_busy:
offer_to_log_in(form.email.data)
return render_template('auth/signup.html', form=form)
@bp.route('/register/<token>', methods=['GET', 'POST'])
@redirect_authenticated
def register(token):
form = RegistrationForm()
email = get_email_from_token(token)
if bool(User.query.filter_by(email=email).first()):
offer_to_log_in(email)
return redirect(url_for('main.index'))
form.email.data = email
if form.validate_on_submit():
new_user = User(
email=email,
first_name=form.first_name.data,
last_name=form.last_name.data,
is_admin=True if email == current_app.config['ADMIN_EMAIL'] else False
)
new_user.set_password(form.password.data)
print(f'{new_user.is_admin:=}')
db.session.add(new_user)
db.session.commit()
if not os.path.isdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id))):
os.mkdir(os.path.join(current_app.config['UPLOAD_PATH'], str(new_user.id)))
flash('You can log in', 'success')
return redirect(url_for('main.index'))
return render_template('auth/register.html', form=form)
@bp.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if email := request.args.get('email'):
form.email.data = email
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if not user:
flash(f'User with email {form.email.data} not registered', 'danger')
return redirect(url_for('auth.signup'))
elif not user.check_password(form.password.data):
flash('Wrong password', 'danger')
return redirect(url_for('main.index'))
else:
login_user(user, remember=form.remember_me.data)
flash(f'Hi, {user.first_name}!', 'success')
return redirect(url_for('main.index'))
return render_template('auth/login.html', form=form)
@bp.route('/log_out', methods=['GET', 'POST'])
@login_required
def log_out():
logout_user()
flash('You are logged out', 'info')
return redirect(url_for('main.index'))
@bp.route('/reset_password', methods=['GET', 'POST'])
def reset_password():
form = ResetPasswordForm()
if current_user.is_authenticated:
form.email.data = current_user.email
form.email.render_kw = {'disabled': True}
is_present = bool(User.query.filter_by(email=form.email.data).first())
if form.validate_on_submit():
if is_present:
send_email(form.email.data, goal='reset')
flash('To continue reset password, follow the link in the letter.', 'info')
return redirect(url_for('main.index'))
else:
href = f"""<a href="{url_for('auth.signup', email=form.email.data)}" class="danger-link">Sign up</a>"""
message = f"The email: {form.email.data} not founded. Please {href} or use correct email."
flash(Markup(message), 'danger')
return render_template('auth/signup.html', form=form)
@bp.route('/new_password/<token>', methods=['GET', 'POST'])
def new_password(token):
form = NewPasswordForm()
serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = serializer.loads(token, salt=current_app.config['SECURITY_PASSWORD_SALT'])
form.email.data = email
user = User.query.filter_by(email=email).first()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash('Password was changed. You can log in', 'success')
return redirect(url_for('main.index'))
elif form.is_submitted():
return render_template('auth/new_password.html', form=form), 422
return render_template('auth/new_password.html', form=form)
@bp.route('/user_page', methods=['GET', 'POST'])
@login_required
def user_page():
form = UserForm(obj=current_user)
if form.validate_on_submit():
is_changed = False
for field in 'email', 'first_name', 'last_name':
if getattr(form, field).data is not getattr(current_user, field):
setattr(current_user, field, getattr(form, field).data)
is_changed = True
if is_changed:
db.session.commit()
return render_template('auth/user_page.html', form=form)
| true | true |
f7262560c667d65265155dbba12d860d64d7e43a | 1,450 | py | Python | utils/prediction.py | catskillsresearch/xview2-catskills | 5671cff323c8121c0ae251e360e454a1e8568f58 | [
"BSD-3-Clause"
] | null | null | null | utils/prediction.py | catskillsresearch/xview2-catskills | 5671cff323c8121c0ae251e360e454a1e8568f58 | [
"BSD-3-Clause"
] | null | null | null | utils/prediction.py | catskillsresearch/xview2-catskills | 5671cff323c8121c0ae251e360e454a1e8568f58 | [
"BSD-3-Clause"
] | null | null | null | #!/home/catskills/anaconda3/envs/xview2/bin/python
import glob, os
from shutil import copyfile
from tqdm import tqdm
from subprocess import call
from IPython.utils.path import ensure_dir_exists
# os.environ["CUDA_VISIBLE_DEVICES"]="1" # second gpu
VERSION=os.getenv('VERSION')
PROJECT='xview2-catskills'
USERDIR='/home/catskills/Desktop'
CODEDIR=f'{USERDIR}/{PROJECT}'
DATADIR=f'{USERDIR}/dataxv2'
TESTDIR=f'{DATADIR}/test/images/'
SUBMIT_DIR=f'{DATADIR}/{VERSION}_submit_001'
MODEL_DIR=f'/home/catskills/Desktop/dataxv2/release/{VERSION}'
LOCALIZATION_MODEL=f'{MODEL_DIR}/localization.hdf5'
DAMAGE_MODEL=f'{MODEL_DIR}/classification.hdf5'
ensure_dir_exists(SUBMIT_DIR)
files = glob.glob(f'{TESTDIR}test_pre_*.png')
for pre_png in tqdm(files):
post_png = pre_png.replace('_pre_','_post_')
image_id = pre_png.split('.')[0].split('/')[-1].split('_')[-1]
out_damage_png = f'{SUBMIT_DIR}/test_damage_{image_id}_prediction.png'
if os.path.isfile(out_damage_png):
continue
out_local_json = f'{SUBMIT_DIR}/test_localization_{image_id}_prediction.json'
out_local_png = f'{SUBMIT_DIR}/test_localization_{image_id}_prediction.png'
C=f'./inference.sh -x {CODEDIR} -i {pre_png} -p {post_png} -l {LOCALIZATION_MODEL} -c {DAMAGE_MODEL} -o {out_damage_png} -y'
call(C, shell=True)
if os.path.isfile(out_damage_png):
copyfile(out_damage_png, out_local_png)
else:
print("PROCESS FAILED", image_id)
| 37.179487 | 128 | 0.743448 |
import glob, os
from shutil import copyfile
from tqdm import tqdm
from subprocess import call
from IPython.utils.path import ensure_dir_exists
getenv('VERSION')
PROJECT='xview2-catskills'
USERDIR='/home/catskills/Desktop'
CODEDIR=f'{USERDIR}/{PROJECT}'
DATADIR=f'{USERDIR}/dataxv2'
TESTDIR=f'{DATADIR}/test/images/'
SUBMIT_DIR=f'{DATADIR}/{VERSION}_submit_001'
MODEL_DIR=f'/home/catskills/Desktop/dataxv2/release/{VERSION}'
LOCALIZATION_MODEL=f'{MODEL_DIR}/localization.hdf5'
DAMAGE_MODEL=f'{MODEL_DIR}/classification.hdf5'
ensure_dir_exists(SUBMIT_DIR)
files = glob.glob(f'{TESTDIR}test_pre_*.png')
for pre_png in tqdm(files):
post_png = pre_png.replace('_pre_','_post_')
image_id = pre_png.split('.')[0].split('/')[-1].split('_')[-1]
out_damage_png = f'{SUBMIT_DIR}/test_damage_{image_id}_prediction.png'
if os.path.isfile(out_damage_png):
continue
out_local_json = f'{SUBMIT_DIR}/test_localization_{image_id}_prediction.json'
out_local_png = f'{SUBMIT_DIR}/test_localization_{image_id}_prediction.png'
C=f'./inference.sh -x {CODEDIR} -i {pre_png} -p {post_png} -l {LOCALIZATION_MODEL} -c {DAMAGE_MODEL} -o {out_damage_png} -y'
call(C, shell=True)
if os.path.isfile(out_damage_png):
copyfile(out_damage_png, out_local_png)
else:
print("PROCESS FAILED", image_id)
| true | true |
f726258904982674e78e973db19b6f500c57c842 | 7,783 | py | Python | integration/python/integration_api/models/baas_sub_account_vo.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | integration/python/integration_api/models/baas_sub_account_vo.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | integration/python/integration_api/models/baas_sub_account_vo.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Integration API
The Hydrogen Integration API # noqa: E501
OpenAPI spec version: 1.2.1
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BaasSubAccountVO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_number': 'str',
'account_status': 'str',
'message': 'str',
'nucleus_portfolio_id': 'str',
'status': 'str',
'vendor_name': 'object',
'vendor_response': 'object'
}
attribute_map = {
'account_number': 'account_number',
'account_status': 'account_status',
'message': 'message',
'nucleus_portfolio_id': 'nucleus_portfolio_id',
'status': 'status',
'vendor_name': 'vendor_name',
'vendor_response': 'vendor_response'
}
def __init__(self, account_number=None, account_status=None, message=None, nucleus_portfolio_id=None, status=None, vendor_name=None, vendor_response=None): # noqa: E501
"""BaasSubAccountVO - a model defined in Swagger""" # noqa: E501
self._account_number = None
self._account_status = None
self._message = None
self._nucleus_portfolio_id = None
self._status = None
self._vendor_name = None
self._vendor_response = None
self.discriminator = None
if account_number is not None:
self.account_number = account_number
if account_status is not None:
self.account_status = account_status
if message is not None:
self.message = message
if nucleus_portfolio_id is not None:
self.nucleus_portfolio_id = nucleus_portfolio_id
if status is not None:
self.status = status
if vendor_name is not None:
self.vendor_name = vendor_name
if vendor_response is not None:
self.vendor_response = vendor_response
@property
def account_number(self):
"""Gets the account_number of this BaasSubAccountVO. # noqa: E501
:return: The account_number of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._account_number
@account_number.setter
def account_number(self, account_number):
"""Sets the account_number of this BaasSubAccountVO.
:param account_number: The account_number of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._account_number = account_number
@property
def account_status(self):
"""Gets the account_status of this BaasSubAccountVO. # noqa: E501
:return: The account_status of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._account_status
@account_status.setter
def account_status(self, account_status):
"""Sets the account_status of this BaasSubAccountVO.
:param account_status: The account_status of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._account_status = account_status
@property
def message(self):
"""Gets the message of this BaasSubAccountVO. # noqa: E501
:return: The message of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this BaasSubAccountVO.
:param message: The message of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._message = message
@property
def nucleus_portfolio_id(self):
"""Gets the nucleus_portfolio_id of this BaasSubAccountVO. # noqa: E501
:return: The nucleus_portfolio_id of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._nucleus_portfolio_id
@nucleus_portfolio_id.setter
def nucleus_portfolio_id(self, nucleus_portfolio_id):
"""Sets the nucleus_portfolio_id of this BaasSubAccountVO.
:param nucleus_portfolio_id: The nucleus_portfolio_id of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._nucleus_portfolio_id = nucleus_portfolio_id
@property
def status(self):
"""Gets the status of this BaasSubAccountVO. # noqa: E501
:return: The status of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this BaasSubAccountVO.
:param status: The status of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._status = status
@property
def vendor_name(self):
"""Gets the vendor_name of this BaasSubAccountVO. # noqa: E501
:return: The vendor_name of this BaasSubAccountVO. # noqa: E501
:rtype: object
"""
return self._vendor_name
@vendor_name.setter
def vendor_name(self, vendor_name):
"""Sets the vendor_name of this BaasSubAccountVO.
:param vendor_name: The vendor_name of this BaasSubAccountVO. # noqa: E501
:type: object
"""
self._vendor_name = vendor_name
@property
def vendor_response(self):
"""Gets the vendor_response of this BaasSubAccountVO. # noqa: E501
:return: The vendor_response of this BaasSubAccountVO. # noqa: E501
:rtype: object
"""
return self._vendor_response
@vendor_response.setter
def vendor_response(self, vendor_response):
"""Sets the vendor_response of this BaasSubAccountVO.
:param vendor_response: The vendor_response of this BaasSubAccountVO. # noqa: E501
:type: object
"""
self._vendor_response = vendor_response
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BaasSubAccountVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BaasSubAccountVO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.613971 | 173 | 0.608634 |
import pprint
import re
import six
class BaasSubAccountVO(object):
swagger_types = {
'account_number': 'str',
'account_status': 'str',
'message': 'str',
'nucleus_portfolio_id': 'str',
'status': 'str',
'vendor_name': 'object',
'vendor_response': 'object'
}
attribute_map = {
'account_number': 'account_number',
'account_status': 'account_status',
'message': 'message',
'nucleus_portfolio_id': 'nucleus_portfolio_id',
'status': 'status',
'vendor_name': 'vendor_name',
'vendor_response': 'vendor_response'
}
def __init__(self, account_number=None, account_status=None, message=None, nucleus_portfolio_id=None, status=None, vendor_name=None, vendor_response=None):
self._account_number = None
self._account_status = None
self._message = None
self._nucleus_portfolio_id = None
self._status = None
self._vendor_name = None
self._vendor_response = None
self.discriminator = None
if account_number is not None:
self.account_number = account_number
if account_status is not None:
self.account_status = account_status
if message is not None:
self.message = message
if nucleus_portfolio_id is not None:
self.nucleus_portfolio_id = nucleus_portfolio_id
if status is not None:
self.status = status
if vendor_name is not None:
self.vendor_name = vendor_name
if vendor_response is not None:
self.vendor_response = vendor_response
@property
def account_number(self):
return self._account_number
@account_number.setter
def account_number(self, account_number):
self._account_number = account_number
@property
def account_status(self):
return self._account_status
@account_status.setter
def account_status(self, account_status):
self._account_status = account_status
@property
def message(self):
return self._message
@message.setter
def message(self, message):
self._message = message
@property
def nucleus_portfolio_id(self):
return self._nucleus_portfolio_id
@nucleus_portfolio_id.setter
def nucleus_portfolio_id(self, nucleus_portfolio_id):
self._nucleus_portfolio_id = nucleus_portfolio_id
@property
def status(self):
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def vendor_name(self):
return self._vendor_name
@vendor_name.setter
def vendor_name(self, vendor_name):
self._vendor_name = vendor_name
@property
def vendor_response(self):
return self._vendor_response
@vendor_response.setter
def vendor_response(self, vendor_response):
self._vendor_response = vendor_response
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BaasSubAccountVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, BaasSubAccountVO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72627ba2a2e2ffe351a5d1e289253920619cb97 | 3,942 | py | Python | utils.py | ChristophReich1996/Mode_Collapse | 937ee8bf96510fbf4070fc7e14b78276ab036b8c | [
"MIT"
] | 14 | 2020-06-22T12:56:10.000Z | 2022-03-31T10:23:00.000Z | utils.py | ChristophReich1996/Mode_Collapse | 937ee8bf96510fbf4070fc7e14b78276ab036b8c | [
"MIT"
] | null | null | null | utils.py | ChristophReich1996/Mode_Collapse | 937ee8bf96510fbf4070fc7e14b78276ab036b8c | [
"MIT"
] | 2 | 2022-01-21T01:22:23.000Z | 2022-02-13T18:08:08.000Z | from typing import Optional
import torch
import torch.nn as nn
from torch.nn.utils import spectral_norm
import numpy as np
def get_generator(latent_size: int, use_spectral_norm: bool) -> nn.Module:
"""
Returns the generator network.
:param latent_size: (int) Size of the latent input vector
:param use_spectral_norm: (bool) If true spectral norm is utilized
:return: (nn.Module) Simple feed forward neural network with three layers,
"""
if use_spectral_norm:
return nn.Sequential(spectral_norm(nn.Linear(latent_size, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.Tanh(),
spectral_norm(nn.Linear(256, 2, bias=True)))
return nn.Sequential(nn.Linear(latent_size, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.Tanh(),
nn.Linear(256, 2, bias=True))
def get_discriminator(use_spectral_norm: bool) -> nn.Module:
"""
Returns the discriminator network.
:param use_spectral_norm: (bool) If true spectral norm is utilized
:return: (nn.Module) Simple feed forward neural network with three layers and probability output.
"""
if use_spectral_norm:
return nn.Sequential(spectral_norm(nn.Linear(2, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 1, bias=True)))
return nn.Sequential(nn.Linear(2, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 1, bias=True))
def get_data(samples: Optional[int] = 400, variance: Optional[float] = 0.05) -> torch.Tensor:
"""
Function generates a 2d ring of 8 Gaussians
:param samples: (Optional[int]) Number of samples including in the resulting dataset. Must be a multiple of 8.
:param variance: (Optional[float]) Variance of the gaussian
:return: (torch.Tensor) generated data
"""
assert samples % 8 == 0 and samples > 0, "Number of samples must be a multiple of 8 and bigger than 0"
# Init angels of the means
angels = torch.cumsum((2 * np.pi / 8) * torch.ones((8)), dim=0)
# Convert angles to 2D coordinates
means = torch.stack([torch.cos(angels), torch.sin(angels)], dim=0)
# Generate data
data = torch.empty((2, samples))
counter = 0
for gaussian in range(means.shape[1]):
for sample in range(int(samples / 8)):
data[:, counter] = torch.normal(means[:, gaussian], variance)
counter += 1
# Reshape data
data = data.T
# Shuffle data
data = data[torch.randperm(data.shape[0])]
# Convert numpy array to tensor
return data.float()
| 44.292135 | 114 | 0.540081 | from typing import Optional
import torch
import torch.nn as nn
from torch.nn.utils import spectral_norm
import numpy as np
def get_generator(latent_size: int, use_spectral_norm: bool) -> nn.Module:
if use_spectral_norm:
return nn.Sequential(spectral_norm(nn.Linear(latent_size, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.Tanh(),
spectral_norm(nn.Linear(256, 2, bias=True)))
return nn.Sequential(nn.Linear(latent_size, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.Tanh(),
nn.Linear(256, 2, bias=True))
def get_discriminator(use_spectral_norm: bool) -> nn.Module:
if use_spectral_norm:
return nn.Sequential(spectral_norm(nn.Linear(2, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 256, bias=True)),
nn.LeakyReLU(),
spectral_norm(nn.Linear(256, 1, bias=True)))
return nn.Sequential(nn.Linear(2, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 256, bias=True),
nn.LeakyReLU(),
nn.Linear(256, 1, bias=True))
def get_data(samples: Optional[int] = 400, variance: Optional[float] = 0.05) -> torch.Tensor:
assert samples % 8 == 0 and samples > 0, "Number of samples must be a multiple of 8 and bigger than 0"
angels = torch.cumsum((2 * np.pi / 8) * torch.ones((8)), dim=0)
means = torch.stack([torch.cos(angels), torch.sin(angels)], dim=0)
data = torch.empty((2, samples))
counter = 0
for gaussian in range(means.shape[1]):
for sample in range(int(samples / 8)):
data[:, counter] = torch.normal(means[:, gaussian], variance)
counter += 1
data = data.T
data = data[torch.randperm(data.shape[0])]
return data.float()
| true | true |
f72627c561e2ac89a391788cfaf08f88db3539eb | 585 | py | Python | rgr/__init__.py | Faxn/rgr | 656f4795bfbb5ca6bf7f9dd6c8e043a3242cd331 | [
"BSD-2-Clause"
] | null | null | null | rgr/__init__.py | Faxn/rgr | 656f4795bfbb5ca6bf7f9dd6c8e043a3242cd331 | [
"BSD-2-Clause"
] | null | null | null | rgr/__init__.py | Faxn/rgr | 656f4795bfbb5ca6bf7f9dd6c8e043a3242cd331 | [
"BSD-2-Clause"
] | null | null | null |
from . import lexer
from .parseRoll import parser
def roll(expression : str):
"Runs the dice expression provided and returns long form result"
try:
tree = parser.parse(expression)
result, hist = tree.roll()
except Exception as E:
return str(E)
return result, hist, tree
def compile(expression : str):
tree = parser.parse(expression)
return tree
try:
from .rgrcog import RGR
def setup(bot):
bot.add_cog(RGR(bot))
except ModuleNotFoundError as e:
def setup(bot):
raise Exception(str(e), e)
pass
| 19.5 | 68 | 0.642735 |
from . import lexer
from .parseRoll import parser
def roll(expression : str):
try:
tree = parser.parse(expression)
result, hist = tree.roll()
except Exception as E:
return str(E)
return result, hist, tree
def compile(expression : str):
tree = parser.parse(expression)
return tree
try:
from .rgrcog import RGR
def setup(bot):
bot.add_cog(RGR(bot))
except ModuleNotFoundError as e:
def setup(bot):
raise Exception(str(e), e)
pass
| true | true |
f726282d7144fcaa8467eabd6470b58a1158a9e7 | 374 | py | Python | projects/intruder_detection/main.py | henriwoodcock/developer | 7ddd7f0b56564c0c13d5505c16b6f89c0c29886a | [
"CC-BY-4.0"
] | null | null | null | projects/intruder_detection/main.py | henriwoodcock/developer | 7ddd7f0b56564c0c13d5505c16b6f89c0c29886a | [
"CC-BY-4.0"
] | null | null | null | projects/intruder_detection/main.py | henriwoodcock/developer | 7ddd7f0b56564c0c13d5505c16b6f89c0c29886a | [
"CC-BY-4.0"
] | null | null | null | from machine import Pin, Timer
def check_sensor(timer):
global sensor
if sensor.value() == 1:
gp1.value(1)
else:
gp1.value(0)
#GP4 - 5v output
gp4 = Pin(4,Pin.OUT)
gp4.value(1)
#GP1 - output for LED
gp1= Pin(1,Pin.OUT)
#GP5 - input from sensor
sensor = Pin(5,Pin.IN)
tim = Timer()
tim.init(freq=1.5, mode=Timer.PERIODIC, callback=check_sensor)
| 19.684211 | 62 | 0.65508 | from machine import Pin, Timer
def check_sensor(timer):
global sensor
if sensor.value() == 1:
gp1.value(1)
else:
gp1.value(0)
gp4 = Pin(4,Pin.OUT)
gp4.value(1)
gp1= Pin(1,Pin.OUT)
sensor = Pin(5,Pin.IN)
tim = Timer()
tim.init(freq=1.5, mode=Timer.PERIODIC, callback=check_sensor)
| true | true |
f72629c9f8acea66ab3447479454246371b49923 | 1,440 | py | Python | src/view/SqliteKeywords.py | struts2spring/sql-editor | 082868dd92cbd8f0f6715f734f9ebe64032cbe4a | [
"MIT"
] | 9 | 2018-10-15T04:57:37.000Z | 2021-12-07T07:39:35.000Z | src/view/SqliteKeywords.py | struts2spring/sql-editor | 082868dd92cbd8f0f6715f734f9ebe64032cbe4a | [
"MIT"
] | 13 | 2018-10-19T11:52:44.000Z | 2021-09-08T00:39:30.000Z | src/view/SqliteKeywords.py | struts2spring/sql-editor | 082868dd92cbd8f0f6715f734f9ebe64032cbe4a | [
"MIT"
] | 3 | 2018-10-25T11:08:04.000Z | 2021-02-23T08:28:31.000Z | '''
Created on 04-Feb-2017
@author: vijay
'''
keyword = [
'ABORT'
, 'ACTION'
, 'ADD'
, 'AFTER'
, 'ALL'
, 'ALTER'
, 'ANALYZE'
, 'AND'
, 'AS'
, 'ASC'
, 'ATTACH'
, 'AUTOINCREMENT'
, 'BEFORE'
, 'BEGIN'
, 'BETWEEN'
, 'BY'
, 'CASCADE'
, 'CASE'
, 'CAST'
, 'CHECK'
, 'COLLATE'
, 'COLUMN'
, 'COMMIT'
, 'CONFLICT'
, 'CONSTRAINT'
, 'CREATE'
, 'CROSS'
, 'CURRENT_DATE'
, 'CURRENT_TIME'
, 'CURRENT_TIMESTAMP'
, 'DATABASE'
, 'DEFAULT'
, 'DEFERRABLE'
, 'DEFERRED'
, 'DELETE'
, 'DESC'
, 'DETACH'
, 'DISTINCT'
, 'DROP'
, 'EACH'
, 'ELSE'
, 'END'
, 'ESCAPE'
, 'EXCEPT'
, 'EXCLUSIVE'
, 'EXISTS'
, 'EXPLAIN'
, 'FAIL'
, 'FOR'
, 'FOREIGN'
, 'FROM'
, 'FULL'
, 'GLOB'
, 'GROUP'
, 'HAVING'
, 'IF'
, 'IGNORE'
, 'IMMEDIATE'
, 'IN'
, 'INDEX'
, 'INDEXED'
, 'INITIALLY'
, 'INNER'
, 'INSERT'
, 'INSTEAD'
, 'INTERSECT'
, 'INTO'
, 'IS'
, 'ISNULL'
, 'JOIN'
, 'KEY'
, 'LEFT'
, 'LIKE'
, 'LIMIT'
, 'MATCH'
, 'NATURAL'
, 'NO'
, 'NOT'
, 'NOTNULL'
, 'NULL'
, 'OF'
, 'OFFSET'
, 'OR'
, 'ORDER'
, 'OUTER'
, 'PLAN'
, 'PRAGMA'
, 'PRIMARY'
, 'QUERY'
, 'RAISE'
, 'RECURSIVE'
, 'REFERENCES'
, 'REGEXP'
, 'REINDEX'
, 'RELEASE'
, 'RENAME'
, 'REPLACE'
, 'RESTRICT'
, 'RIGHT'
, 'ROLLBACK'
, 'ROW'
, 'SAVEPOINT'
, 'SELECT'
, 'SET'
, 'TABLE'
, 'TEMP'
, 'TEMPORARY'
, 'THEN'
, 'TO'
, 'TRANSACTION'
, 'TRIGGER'
, 'UNION'
, 'UNIQUE'
, 'UPDATE'
, 'USING'
, 'VACUUM'
, 'VALUES'
, 'VIEW'
, 'VIRTUAL'
, 'WHEN'
, 'WHERE'
, 'WITH'
, 'WITHOUT'
]
if __name__ == '__main__':
pass
| 10.510949 | 26 | 0.531944 |
keyword = [
'ABORT'
, 'ACTION'
, 'ADD'
, 'AFTER'
, 'ALL'
, 'ALTER'
, 'ANALYZE'
, 'AND'
, 'AS'
, 'ASC'
, 'ATTACH'
, 'AUTOINCREMENT'
, 'BEFORE'
, 'BEGIN'
, 'BETWEEN'
, 'BY'
, 'CASCADE'
, 'CASE'
, 'CAST'
, 'CHECK'
, 'COLLATE'
, 'COLUMN'
, 'COMMIT'
, 'CONFLICT'
, 'CONSTRAINT'
, 'CREATE'
, 'CROSS'
, 'CURRENT_DATE'
, 'CURRENT_TIME'
, 'CURRENT_TIMESTAMP'
, 'DATABASE'
, 'DEFAULT'
, 'DEFERRABLE'
, 'DEFERRED'
, 'DELETE'
, 'DESC'
, 'DETACH'
, 'DISTINCT'
, 'DROP'
, 'EACH'
, 'ELSE'
, 'END'
, 'ESCAPE'
, 'EXCEPT'
, 'EXCLUSIVE'
, 'EXISTS'
, 'EXPLAIN'
, 'FAIL'
, 'FOR'
, 'FOREIGN'
, 'FROM'
, 'FULL'
, 'GLOB'
, 'GROUP'
, 'HAVING'
, 'IF'
, 'IGNORE'
, 'IMMEDIATE'
, 'IN'
, 'INDEX'
, 'INDEXED'
, 'INITIALLY'
, 'INNER'
, 'INSERT'
, 'INSTEAD'
, 'INTERSECT'
, 'INTO'
, 'IS'
, 'ISNULL'
, 'JOIN'
, 'KEY'
, 'LEFT'
, 'LIKE'
, 'LIMIT'
, 'MATCH'
, 'NATURAL'
, 'NO'
, 'NOT'
, 'NOTNULL'
, 'NULL'
, 'OF'
, 'OFFSET'
, 'OR'
, 'ORDER'
, 'OUTER'
, 'PLAN'
, 'PRAGMA'
, 'PRIMARY'
, 'QUERY'
, 'RAISE'
, 'RECURSIVE'
, 'REFERENCES'
, 'REGEXP'
, 'REINDEX'
, 'RELEASE'
, 'RENAME'
, 'REPLACE'
, 'RESTRICT'
, 'RIGHT'
, 'ROLLBACK'
, 'ROW'
, 'SAVEPOINT'
, 'SELECT'
, 'SET'
, 'TABLE'
, 'TEMP'
, 'TEMPORARY'
, 'THEN'
, 'TO'
, 'TRANSACTION'
, 'TRIGGER'
, 'UNION'
, 'UNIQUE'
, 'UPDATE'
, 'USING'
, 'VACUUM'
, 'VALUES'
, 'VIEW'
, 'VIRTUAL'
, 'WHEN'
, 'WHERE'
, 'WITH'
, 'WITHOUT'
]
if __name__ == '__main__':
pass
| true | true |
f72629d7ccf97ec969ccabc9f97fbd9dea75c8a0 | 2,228 | py | Python | symbols/symbol_ssdh.py | galad-loth/LearnDescriptor | 30552a699597415a13793eb85d21b5e33a296a99 | [
"Apache-2.0"
] | 100 | 2018-02-06T10:47:43.000Z | 2022-02-16T01:11:30.000Z | symbols/symbol_ssdh.py | JiaxueLi/DeepMatch | 30552a699597415a13793eb85d21b5e33a296a99 | [
"Apache-2.0"
] | 2 | 2019-07-24T17:22:37.000Z | 2020-03-19T04:11:47.000Z | symbols/symbol_ssdh.py | JiaxueLi/DeepMatch | 30552a699597415a13793eb85d21b5e33a296a99 | [
"Apache-2.0"
] | 21 | 2018-11-11T06:35:43.000Z | 2020-11-25T07:52:20.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 07 21:00:11 2017
@author: galad-loth
"""
import numpy as npy
import mxnet as mx
class HashLossLayer(mx.operator.NumpyOp):
def __init__(self, w_bin,w_balance):
super(HashLossLayer, self).__init__(False)
self.w_bin=w_bin
self.w_balance=w_balance
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape=in_shape[0]
return [data_shape],[data_shape]
def forward(self, in_data, out_data):
x=in_data[0]
# l=in_data[1]
y=out_data[0]
xs=x-0.5
y[:]=1
y[xs<0]=0
# y[:]=npy.ones((x.shape[0],1))-l.reshape((x.shape[0],1))*x
def backward(self, out_grad, in_data, out_data, in_grad):
x=in_data[0]
dx=in_grad[0]
grad1=-2*(x-0.5)/x.shape[1]
mu=npy.mean(x,axis=1)
grad2=2*(mu-0.5)/x.shape[1]
grad=self.w_bin*grad1+self.w_balance*grad2
dx[:]=grad
def get_finetune_symbol(net_pre,arg_params,
num_latent, num_class,layer_name='flatten'):
"""
net_pre: the pre-trained network symbol
arg_params: the argument parameters of the pre-trained model
num_latent: the number of latent layer units for the fine-tune datasets
layer_name: the layer name before the last fully-connected layer
"""
all_layers = net_pre.get_internals()
load_net = all_layers[layer_name+'_output']
latent = mx.symbol.FullyConnected(data=load_net, num_hidden=num_latent, name='latent_ssdh')
latent = mx.sym.Activation(data=latent, act_type="sigmoid", name="sigmoid_ssdh")
class_net = mx.symbol.FullyConnected(data=latent, num_hidden=num_class, name='fc_ssdh')
class_net = mx.symbol.SoftmaxOutput(data=class_net, name='softmax')
hash_loss=HashLossLayer(0.1,0.1)
hash_net=hash_loss(data=latent, name="hash")
net = mx.sym.Group([class_net,hash_net])
new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})
return (net, new_args)
| 33.757576 | 96 | 0.612208 |
import numpy as npy
import mxnet as mx
class HashLossLayer(mx.operator.NumpyOp):
def __init__(self, w_bin,w_balance):
super(HashLossLayer, self).__init__(False)
self.w_bin=w_bin
self.w_balance=w_balance
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape=in_shape[0]
return [data_shape],[data_shape]
def forward(self, in_data, out_data):
x=in_data[0]
y=out_data[0]
xs=x-0.5
y[:]=1
y[xs<0]=0
def backward(self, out_grad, in_data, out_data, in_grad):
x=in_data[0]
dx=in_grad[0]
grad1=-2*(x-0.5)/x.shape[1]
mu=npy.mean(x,axis=1)
grad2=2*(mu-0.5)/x.shape[1]
grad=self.w_bin*grad1+self.w_balance*grad2
dx[:]=grad
def get_finetune_symbol(net_pre,arg_params,
num_latent, num_class,layer_name='flatten'):
all_layers = net_pre.get_internals()
load_net = all_layers[layer_name+'_output']
latent = mx.symbol.FullyConnected(data=load_net, num_hidden=num_latent, name='latent_ssdh')
latent = mx.sym.Activation(data=latent, act_type="sigmoid", name="sigmoid_ssdh")
class_net = mx.symbol.FullyConnected(data=latent, num_hidden=num_class, name='fc_ssdh')
class_net = mx.symbol.SoftmaxOutput(data=class_net, name='softmax')
hash_loss=HashLossLayer(0.1,0.1)
hash_net=hash_loss(data=latent, name="hash")
net = mx.sym.Group([class_net,hash_net])
new_args = dict({k:arg_params[k] for k in arg_params if 'fc' not in k})
return (net, new_args)
| true | true |
f72629ddc71a4e57e29e2e43fc86db58df8c4de3 | 23,900 | py | Python | plotly_study/graph_objs/layout/ternary/aaxis/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | plotly_study/graph_objs/layout/ternary/aaxis/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | plotly_study/graph_objs/layout/ternary/aaxis/__init__.py | lucasiscovici/plotly_py | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | [
"MIT"
] | null | null | null | from plotly_study.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Title(_BaseLayoutHierarchyType):
# font
# ----
@property
def font(self):
"""
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly_study.graph_objs.layout.ternary.aaxis.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly_study.graph_objs.layout.ternary.aaxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.ternary.aaxis"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.layout.ternary.aaxis.Title
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.layout.ternary.aaxis.Title
constructor must be a dict or
an instance of plotly_study.graph_objs.layout.ternary.aaxis.Title"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.layout.ternary.aaxis import title as v_title
# Initialize validators
# ---------------------
self._validators["font"] = v_title.FontValidator()
self._validators["text"] = v_title.TextValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("text", None)
self["text"] = text if text is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickformatstop(_BaseLayoutHierarchyType):
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.ternary.aaxis"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.layout.ternary.aaxis.Tickformatstop
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.layout.ternary.aaxis.Tickformatstop
constructor must be a dict or
an instance of plotly_study.graph_objs.layout.ternary.aaxis.Tickformatstop"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.layout.ternary.aaxis import (
tickformatstop as v_tickformatstop,
)
# Initialize validators
# ---------------------
self._validators["dtickrange"] = v_tickformatstop.DtickrangeValidator()
self._validators["enabled"] = v_tickformatstop.EnabledValidator()
self._validators["name"] = v_tickformatstop.NameValidator()
self._validators[
"templateitemname"
] = v_tickformatstop.TemplateitemnameValidator()
self._validators["value"] = v_tickformatstop.ValueValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
self["dtickrange"] = dtickrange if dtickrange is not None else _v
_v = arg.pop("enabled", None)
self["enabled"] = enabled if enabled is not None else _v
_v = arg.pop("name", None)
self["name"] = name if name is not None else _v
_v = arg.pop("templateitemname", None)
self["templateitemname"] = (
templateitemname if templateitemname is not None else _v
)
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickfont(_BaseLayoutHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.ternary.aaxis"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.layout.ternary.aaxis.Tickfont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.layout.ternary.aaxis.Tickfont
constructor must be a dict or
an instance of plotly_study.graph_objs.layout.ternary.aaxis.Tickfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.layout.ternary.aaxis import tickfont as v_tickfont
# Initialize validators
# ---------------------
self._validators["color"] = v_tickfont.ColorValidator()
self._validators["family"] = v_tickfont.FamilyValidator()
self._validators["size"] = v_tickfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Tickfont", "Tickformatstop", "Tickformatstop", "Title", "title"]
from plotly_study.graph_objs.layout.ternary.aaxis import title
| 34.738372 | 90 | 0.567155 | from plotly_study.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Title(_BaseLayoutHierarchyType):
@property
def font(self):
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def text(self):
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _parent_path_str(self):
return "layout.ternary.aaxis"
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
super(Title, self).__init__("title")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.layout.ternary.aaxis.Title
constructor must be a dict or
an instance of plotly_study.graph_objs.layout.ternary.aaxis.Title"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.layout.ternary.aaxis import title as v_title
# Initialize validators
# ---------------------
self._validators["font"] = v_title.FontValidator()
self._validators["text"] = v_title.TextValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("text", None)
self["text"] = text if text is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickformatstop(_BaseLayoutHierarchyType):
# dtickrange
# ----------
@property
def dtickrange(self):
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.ternary.aaxis"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
super(Tickformatstop, self).__init__("tickformatstops")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.layout.ternary.aaxis.Tickformatstop
constructor must be a dict or
an instance of plotly_study.graph_objs.layout.ternary.aaxis.Tickformatstop"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.layout.ternary.aaxis import (
tickformatstop as v_tickformatstop,
)
# Initialize validators
# ---------------------
self._validators["dtickrange"] = v_tickformatstop.DtickrangeValidator()
self._validators["enabled"] = v_tickformatstop.EnabledValidator()
self._validators["name"] = v_tickformatstop.NameValidator()
self._validators[
"templateitemname"
] = v_tickformatstop.TemplateitemnameValidator()
self._validators["value"] = v_tickformatstop.ValueValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
self["dtickrange"] = dtickrange if dtickrange is not None else _v
_v = arg.pop("enabled", None)
self["enabled"] = enabled if enabled is not None else _v
_v = arg.pop("name", None)
self["name"] = name if name is not None else _v
_v = arg.pop("templateitemname", None)
self["templateitemname"] = (
templateitemname if templateitemname is not None else _v
)
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Tickfont(_BaseLayoutHierarchyType):
# color
# -----
@property
def color(self):
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "layout.ternary.aaxis"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
super(Tickfont, self).__init__("tickfont")
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.layout.ternary.aaxis.Tickfont
constructor must be a dict or
an instance of plotly_study.graph_objs.layout.ternary.aaxis.Tickfont"""
)
self._skip_invalid = kwargs.pop("skip_invalid", False)
from plotly_study.validators.layout.ternary.aaxis import tickfont as v_tickfont
self._validators["color"] = v_tickfont.ColorValidator()
self._validators["family"] = v_tickfont.FamilyValidator()
self._validators["size"] = v_tickfont.SizeValidator()
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
__all__ = ["Tickfont", "Tickformatstop", "Tickformatstop", "Title", "title"]
from plotly_study.graph_objs.layout.ternary.aaxis import title
| true | true |
f7262a2f0da63f591723f9cdf91c2bae40d81f7d | 19,587 | py | Python | pandas/tests/reshape/test_tile.py | stevenvandenberghe/pandas | 8cbee356da1161c56c64f6f89cb5548bcadc3e44 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/reshape/test_tile.py | stevenvandenberghe/pandas | 8cbee356da1161c56c64f6f89cb5548bcadc3e44 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/reshape/test_tile.py | stevenvandenberghe/pandas | 8cbee356da1161c56c64f6f89cb5548bcadc3e44 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2019-03-08T19:59:05.000Z | 2020-09-27T03:18:37.000Z | import os
import pytest
import numpy as np
from pandas.compat import zip
from pandas import (Series, isna, to_datetime, DatetimeIndex,
Timestamp, Interval, IntervalIndex, Categorical,
cut, qcut, date_range)
import pandas.util.testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
import pandas.core.reshape.tile as tmod
class TestCut(object):
def test_simple(self):
data = np.ones(5, dtype='int64')
result = cut(data, 4, labels=False)
expected = np.array([1, 1, 1, 1, 1])
tm.assert_numpy_array_equal(result, expected,
check_dtype=False)
def test_bins(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_right(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
expected = Categorical(intervals, ordered=True)
expected = expected.take([0, 0, 0, 2, 3, 0, 0])
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95,
7.325, 9.7]))
def test_noright(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3), closed='left')
intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95,
7.325, 9.7095]))
def test_arraylike(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_bins_from_intervalindex(self):
c = cut(range(5), 3)
expected = c
result = cut(range(5), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
expected = Categorical.from_codes(np.append(c.codes, -1),
categories=c.categories,
ordered=True)
result = cut(range(6), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
# doc example
# make sure we preserve the bins
ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])
c = cut(ages, bins=[0, 18, 35, 70])
expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])
tm.assert_index_equal(c.categories, expected)
result = cut([25, 20, 50], bins=c.categories)
tm.assert_index_equal(result.categories, expected)
tm.assert_numpy_array_equal(result.codes,
np.array([1, 1, 2], dtype='int8'))
def test_bins_not_monotonic(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
pytest.raises(ValueError, cut, data, [0.1, 1.5, 1, 10])
def test_wrong_num_labels(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
pytest.raises(ValueError, cut, data, [0, 1, 10],
labels=['foo', 'bar', 'baz'])
def test_cut_corner(self):
# h3h
pytest.raises(ValueError, cut, [], 2)
pytest.raises(ValueError, cut, [1, 2, 3], 0.5)
def test_cut_out_of_range_more(self):
# #1511
s = Series([0, -1, 0, 1, -3], name='x')
ind = cut(s, [0, 1], labels=False)
exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name='x')
tm.assert_series_equal(ind, exp)
def test_labels(self):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True)
ex_levels = IntervalIndex.from_breaks([-1e-3, 0.25, 0.5, 0.75, 1])
tm.assert_index_equal(result.categories, ex_levels)
result, bins = cut(arr, 4, retbins=True, right=False)
ex_levels = IntervalIndex.from_breaks([0, 0.25, 0.5, 0.75, 1 + 1e-3],
closed='left')
tm.assert_index_equal(result.categories, ex_levels)
def test_cut_pass_series_name_to_factor(self):
s = Series(np.random.randn(100), name='foo')
factor = cut(s, 4)
assert factor.name == 'foo'
def test_label_precision(self):
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36,
0.54, 0.72])
tm.assert_index_equal(result.categories, ex_levels)
def test_na_handling(self):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4)
result_arr = np.asarray(result)
ex_arr = np.where(isna(arr), np.nan, result_arr)
tm.assert_almost_equal(result_arr, ex_arr)
result = cut(arr, 4, labels=False)
ex_result = np.where(isna(arr), np.nan, result)
tm.assert_almost_equal(result, ex_result)
def test_inf_handling(self):
data = np.arange(6)
data_ser = Series(data, dtype='int64')
bins = [-np.inf, 2, 4, np.inf]
result = cut(data, bins)
result_ser = cut(data_ser, bins)
ex_uniques = IntervalIndex.from_breaks(bins)
tm.assert_index_equal(result.categories, ex_uniques)
assert result[5] == Interval(4, np.inf)
assert result[0] == Interval(-np.inf, 2)
assert result_ser[5] == Interval(4, np.inf)
assert result_ser[0] == Interval(-np.inf, 2)
def test_qcut(self):
arr = np.random.randn(1000)
# We store the bins as Index that have been rounded
# to comparisons are a bit tricky.
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, .25, .5, .75, 1.])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds(self):
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles(self):
arr = np.random.randn(100)
factor = qcut(arr, [0, .25, .5, .75, 1.])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same(self):
tm.assert_raises_regex(ValueError, "edges.*unique", qcut,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_cut_out_of_bounds(self):
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = isna(result)
ex_mask = (arr < -1) | (arr > 1)
tm.assert_numpy_array_equal(mask, ex_mask)
def test_cut_pass_labels(self):
arr = [50, 5, 10, 15, 20, 30, 70]
bins = [0, 25, 50, 100]
labels = ['Small', 'Medium', 'Large']
result = cut(arr, bins, labels=labels)
exp = Categorical(['Medium'] + 4 * ['Small'] + ['Medium', 'Large'],
categories=labels,
ordered=True)
tm.assert_categorical_equal(result, exp)
result = cut(arr, bins, labels=Categorical.from_codes([0, 1, 2],
labels))
exp = Categorical.from_codes([1] + 4 * [0] + [1, 2], labels)
tm.assert_categorical_equal(result, exp)
# issue 16459
labels = ['Good', 'Medium', 'Bad']
result = cut(arr, 3, labels=labels)
exp = cut(arr, 3, labels=Categorical(labels, categories=labels,
ordered=True))
tm.assert_categorical_equal(result, exp)
def test_qcut_include_lowest(self):
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9)])
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas(self):
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index(self):
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_round_frac(self):
# it works
result = cut(np.arange(11.), 2)
result = cut(np.arange(11.) / 1e10, 2)
# #1979, negative numbers
result = tmod._round_frac(-117.9998, precision=3)
assert result == -118
result = tmod._round_frac(117.9998, precision=3)
assert result == 118
result = tmod._round_frac(117.9998, precision=2)
assert result == 118
result = tmod._round_frac(0.000123456, precision=2)
assert result == 0.00012
def test_qcut_binning_issues(self):
# #1978, 1979
path = os.path.join(tm.get_data_path(), 'cut_data.csv')
arr = np.loadtxt(path)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),
zip(ends[:-1], ends[1:])):
assert sp < sn
assert ep < en
assert ep <= sn
def test_cut_return_intervals(self):
s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = cut(s, 3)
exp_bins = np.linspace(0, 8, num=4).round(3)
exp_bins[0] -= 0.008
exp = Series(IntervalIndex.from_breaks(exp_bins, closed='right').take(
[0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
def test_qcut_return_intervals(self):
s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(s, [0, 0.333, 0.666, 1])
exp_levels = np.array([Interval(-0.001, 2.664),
Interval(2.664, 5.328), Interval(5.328, 8)])
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(
CDT(ordered=True))
tm.assert_series_equal(res, exp)
def test_series_retbins(self):
# GH 8589
s = Series(np.arange(4))
result, bins = cut(s, 2, retbins=True)
expected = Series(IntervalIndex.from_breaks(
[-0.003, 1.5, 3], closed='right').repeat(2)).astype(
CDT(ordered=True))
tm.assert_series_equal(result, expected)
result, bins = qcut(s, 2, retbins=True)
expected = Series(IntervalIndex.from_breaks(
[-0.001, 1.5, 3], closed='right').repeat(2)).astype(
CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_qcut_duplicates_bin(self):
# GH 7751
values = [0, 0, 0, 0, 1, 2, 3]
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
result = qcut(values, 3, duplicates='drop')
tm.assert_index_equal(result.categories, expected)
pytest.raises(ValueError, qcut, values, 3)
pytest.raises(ValueError, qcut, values, 3, duplicates='raise')
# invalid
pytest.raises(ValueError, qcut, values, 3, duplicates='foo')
def test_single_quantile(self):
# issue 15431
expected = Series([0, 0])
s = Series([9., 9.])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(8.999, 9.0),
Interval(8.999, 9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([-9., -9.])
expected = Series([0, 0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-9.001, -9.0),
Interval(-9.001, -9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([0., 0.])
expected = Series([0, 0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-0.001, 0.0),
Interval(-0.001, 0.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([9])
expected = Series([0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(8.999, 9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([-9])
expected = Series([0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-9.001, -9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([0])
expected = Series([0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-0.001, 0.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_single_bin(self):
# issue 14652
expected = Series([0, 0])
s = Series([9., 9.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
s = Series([-9., -9.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
expected = Series([0])
s = Series([9])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
s = Series([-9])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
# issue 15428
expected = Series([0, 0])
s = Series([0., 0.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
expected = Series([0])
s = Series([0])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
def test_datetime_cut(self):
# GH 14714
# testing for time data to be present as series
data = to_datetime(Series(['2013-01-01', '2013-01-02', '2013-01-03']))
result, bins = cut(data, 3, retbins=True)
expected = (
Series(IntervalIndex([
Interval(Timestamp('2012-12-31 23:57:07.200000'),
Timestamp('2013-01-01 16:00:00')),
Interval(Timestamp('2013-01-01 16:00:00'),
Timestamp('2013-01-02 08:00:00')),
Interval(Timestamp('2013-01-02 08:00:00'),
Timestamp('2013-01-03 00:00:00'))]))
.astype(CDT(ordered=True)))
tm.assert_series_equal(result, expected)
# testing for time data to be present as list
data = [np.datetime64('2013-01-01'), np.datetime64('2013-01-02'),
np.datetime64('2013-01-03')]
result, bins = cut(data, 3, retbins=True)
tm.assert_series_equal(Series(result), expected)
# testing for time data to be present as ndarray
data = np.array([np.datetime64('2013-01-01'),
np.datetime64('2013-01-02'),
np.datetime64('2013-01-03')])
result, bins = cut(data, 3, retbins=True)
tm.assert_series_equal(Series(result), expected)
# testing for time data to be present as datetime index
data = DatetimeIndex(['2013-01-01', '2013-01-02', '2013-01-03'])
result, bins = cut(data, 3, retbins=True)
tm.assert_series_equal(Series(result), expected)
def test_datetime_bin(self):
data = [np.datetime64('2012-12-13'), np.datetime64('2012-12-15')]
bin_data = ['2012-12-12', '2012-12-14', '2012-12-16']
expected = (
Series(IntervalIndex([
Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),
Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))]))
.astype(CDT(ordered=True)))
for conv in [Timestamp, Timestamp, np.datetime64]:
bins = [conv(v) for v in bin_data]
result = cut(data, bins=bins)
tm.assert_series_equal(Series(result), expected)
bin_pydatetime = [Timestamp(v).to_pydatetime() for v in bin_data]
result = cut(data, bins=bin_pydatetime)
tm.assert_series_equal(Series(result), expected)
bins = to_datetime(bin_data)
result = cut(data, bins=bin_pydatetime)
tm.assert_series_equal(Series(result), expected)
def test_datetime_nan(self):
def f():
cut(date_range('20130101', periods=3), bins=[0, 2, 4])
pytest.raises(ValueError, f)
result = cut(date_range('20130102', periods=5),
bins=date_range('20130101', periods=2))
mask = result.categories.isna()
tm.assert_numpy_array_equal(mask, np.array([False]))
mask = result.isna()
tm.assert_numpy_array_equal(
mask, np.array([False, True, True, True, True]))
@pytest.mark.parametrize(
"array_1_writeable, array_2_writeable",
[(True, True), (True, False), (False, False)])
def test_cut_read_only(self, array_1_writeable, array_2_writeable):
# issue 18773
array_1 = np.arange(0, 100, 10)
array_1.flags.writeable = array_1_writeable
array_2 = np.arange(0, 100, 10)
array_2.flags.writeable = array_2_writeable
hundred_elements = np.arange(100)
tm.assert_categorical_equal(cut(hundred_elements, array_1),
cut(hundred_elements, array_2))
| 37.026465 | 78 | 0.56512 | import os
import pytest
import numpy as np
from pandas.compat import zip
from pandas import (Series, isna, to_datetime, DatetimeIndex,
Timestamp, Interval, IntervalIndex, Categorical,
cut, qcut, date_range)
import pandas.util.testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.algorithms import quantile
import pandas.core.reshape.tile as tmod
class TestCut(object):
def test_simple(self):
data = np.ones(5, dtype='int64')
result = cut(data, 4, labels=False)
expected = np.array([1, 1, 1, 1, 1])
tm.assert_numpy_array_equal(result, expected,
check_dtype=False)
def test_bins(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_right(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
expected = Categorical(intervals, ordered=True)
expected = expected.take([0, 0, 0, 2, 3, 0, 0])
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95,
7.325, 9.7]))
def test_noright(self):
data = np.array([.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3), closed='left')
intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95,
7.325, 9.7095]))
def test_arraylike(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667,
6.53333333, 9.7]))
def test_bins_from_intervalindex(self):
c = cut(range(5), 3)
expected = c
result = cut(range(5), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
expected = Categorical.from_codes(np.append(c.codes, -1),
categories=c.categories,
ordered=True)
result = cut(range(6), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])
c = cut(ages, bins=[0, 18, 35, 70])
expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])
tm.assert_index_equal(c.categories, expected)
result = cut([25, 20, 50], bins=c.categories)
tm.assert_index_equal(result.categories, expected)
tm.assert_numpy_array_equal(result.codes,
np.array([1, 1, 2], dtype='int8'))
def test_bins_not_monotonic(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
pytest.raises(ValueError, cut, data, [0.1, 1.5, 1, 10])
def test_wrong_num_labels(self):
data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1]
pytest.raises(ValueError, cut, data, [0, 1, 10],
labels=['foo', 'bar', 'baz'])
def test_cut_corner(self):
pytest.raises(ValueError, cut, [], 2)
pytest.raises(ValueError, cut, [1, 2, 3], 0.5)
def test_cut_out_of_range_more(self):
s = Series([0, -1, 0, 1, -3], name='x')
ind = cut(s, [0, 1], labels=False)
exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name='x')
tm.assert_series_equal(ind, exp)
def test_labels(self):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True)
ex_levels = IntervalIndex.from_breaks([-1e-3, 0.25, 0.5, 0.75, 1])
tm.assert_index_equal(result.categories, ex_levels)
result, bins = cut(arr, 4, retbins=True, right=False)
ex_levels = IntervalIndex.from_breaks([0, 0.25, 0.5, 0.75, 1 + 1e-3],
closed='left')
tm.assert_index_equal(result.categories, ex_levels)
def test_cut_pass_series_name_to_factor(self):
s = Series(np.random.randn(100), name='foo')
factor = cut(s, 4)
assert factor.name == 'foo'
def test_label_precision(self):
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36,
0.54, 0.72])
tm.assert_index_equal(result.categories, ex_levels)
def test_na_handling(self):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4)
result_arr = np.asarray(result)
ex_arr = np.where(isna(arr), np.nan, result_arr)
tm.assert_almost_equal(result_arr, ex_arr)
result = cut(arr, 4, labels=False)
ex_result = np.where(isna(arr), np.nan, result)
tm.assert_almost_equal(result, ex_result)
def test_inf_handling(self):
data = np.arange(6)
data_ser = Series(data, dtype='int64')
bins = [-np.inf, 2, 4, np.inf]
result = cut(data, bins)
result_ser = cut(data_ser, bins)
ex_uniques = IntervalIndex.from_breaks(bins)
tm.assert_index_equal(result.categories, ex_uniques)
assert result[5] == Interval(4, np.inf)
assert result[0] == Interval(-np.inf, 2)
assert result_ser[5] == Interval(4, np.inf)
assert result_ser[0] == Interval(-np.inf, 2)
def test_qcut(self):
arr = np.random.randn(1000)
labels, bins = qcut(arr, 4, retbins=True)
ex_bins = quantile(arr, [0, .25, .5, .75, 1.])
result = labels.categories.left.values
assert np.allclose(result, ex_bins[:-1], atol=1e-2)
result = labels.categories.right.values
assert np.allclose(result, ex_bins[1:], atol=1e-2)
ex_levels = cut(arr, ex_bins, include_lowest=True)
tm.assert_categorical_equal(labels, ex_levels)
def test_qcut_bounds(self):
arr = np.random.randn(1000)
factor = qcut(arr, 10, labels=False)
assert len(np.unique(factor)) == 10
def test_qcut_specify_quantiles(self):
arr = np.random.randn(100)
factor = qcut(arr, [0, .25, .5, .75, 1.])
expected = qcut(arr, 4)
tm.assert_categorical_equal(factor, expected)
def test_qcut_all_bins_same(self):
tm.assert_raises_regex(ValueError, "edges.*unique", qcut,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3)
def test_cut_out_of_bounds(self):
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = isna(result)
ex_mask = (arr < -1) | (arr > 1)
tm.assert_numpy_array_equal(mask, ex_mask)
def test_cut_pass_labels(self):
arr = [50, 5, 10, 15, 20, 30, 70]
bins = [0, 25, 50, 100]
labels = ['Small', 'Medium', 'Large']
result = cut(arr, bins, labels=labels)
exp = Categorical(['Medium'] + 4 * ['Small'] + ['Medium', 'Large'],
categories=labels,
ordered=True)
tm.assert_categorical_equal(result, exp)
result = cut(arr, bins, labels=Categorical.from_codes([0, 1, 2],
labels))
exp = Categorical.from_codes([1] + 4 * [0] + [1, 2], labels)
tm.assert_categorical_equal(result, exp)
labels = ['Good', 'Medium', 'Bad']
result = cut(arr, 3, labels=labels)
exp = cut(arr, 3, labels=Categorical(labels, categories=labels,
ordered=True))
tm.assert_categorical_equal(result, exp)
def test_qcut_include_lowest(self):
values = np.arange(10)
ii = qcut(values, 4)
ex_levels = IntervalIndex(
[Interval(-0.001, 2.25),
Interval(2.25, 4.5),
Interval(4.5, 6.75),
Interval(6.75, 9)])
tm.assert_index_equal(ii.categories, ex_levels)
def test_qcut_nas(self):
arr = np.random.randn(100)
arr[:20] = np.nan
result = qcut(arr, 4)
assert isna(result[:20]).all()
def test_qcut_index(self):
result = qcut([0, 2], 2)
intervals = [Interval(-0.001, 1), Interval(1, 2)]
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_round_frac(self):
result = cut(np.arange(11.), 2)
result = cut(np.arange(11.) / 1e10, 2)
._round_frac(-117.9998, precision=3)
assert result == -118
result = tmod._round_frac(117.9998, precision=3)
assert result == 118
result = tmod._round_frac(117.9998, precision=2)
assert result == 118
result = tmod._round_frac(0.000123456, precision=2)
assert result == 0.00012
def test_qcut_binning_issues(self):
th = os.path.join(tm.get_data_path(), 'cut_data.csv')
arr = np.loadtxt(path)
result = qcut(arr, 20)
starts = []
ends = []
for lev in np.unique(result):
s = lev.left
e = lev.right
assert s != e
starts.append(float(s))
ends.append(float(e))
for (sp, sn), (ep, en) in zip(zip(starts[:-1], starts[1:]),
zip(ends[:-1], ends[1:])):
assert sp < sn
assert ep < en
assert ep <= sn
def test_cut_return_intervals(self):
s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = cut(s, 3)
exp_bins = np.linspace(0, 8, num=4).round(3)
exp_bins[0] -= 0.008
exp = Series(IntervalIndex.from_breaks(exp_bins, closed='right').take(
[0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(CDT(ordered=True))
tm.assert_series_equal(res, exp)
def test_qcut_return_intervals(self):
s = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
res = qcut(s, [0, 0.333, 0.666, 1])
exp_levels = np.array([Interval(-0.001, 2.664),
Interval(2.664, 5.328), Interval(5.328, 8)])
exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype(
CDT(ordered=True))
tm.assert_series_equal(res, exp)
def test_series_retbins(self):
s = Series(np.arange(4))
result, bins = cut(s, 2, retbins=True)
expected = Series(IntervalIndex.from_breaks(
[-0.003, 1.5, 3], closed='right').repeat(2)).astype(
CDT(ordered=True))
tm.assert_series_equal(result, expected)
result, bins = qcut(s, 2, retbins=True)
expected = Series(IntervalIndex.from_breaks(
[-0.001, 1.5, 3], closed='right').repeat(2)).astype(
CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_qcut_duplicates_bin(self):
values = [0, 0, 0, 0, 1, 2, 3]
expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)])
result = qcut(values, 3, duplicates='drop')
tm.assert_index_equal(result.categories, expected)
pytest.raises(ValueError, qcut, values, 3)
pytest.raises(ValueError, qcut, values, 3, duplicates='raise')
pytest.raises(ValueError, qcut, values, 3, duplicates='foo')
def test_single_quantile(self):
expected = Series([0, 0])
s = Series([9., 9.])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(8.999, 9.0),
Interval(8.999, 9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([-9., -9.])
expected = Series([0, 0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-9.001, -9.0),
Interval(-9.001, -9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([0., 0.])
expected = Series([0, 0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-0.001, 0.0),
Interval(-0.001, 0.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([9])
expected = Series([0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(8.999, 9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([-9])
expected = Series([0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-9.001, -9.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
s = Series([0])
expected = Series([0])
result = qcut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
result = qcut(s, 1)
intervals = IntervalIndex([Interval(-0.001, 0.0)], closed='right')
expected = Series(intervals).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_single_bin(self):
expected = Series([0, 0])
s = Series([9., 9.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
s = Series([-9., -9.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
expected = Series([0])
s = Series([9])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
s = Series([-9])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
expected = Series([0, 0])
s = Series([0., 0.])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
expected = Series([0])
s = Series([0])
result = cut(s, 1, labels=False)
tm.assert_series_equal(result, expected)
def test_datetime_cut(self):
data = to_datetime(Series(['2013-01-01', '2013-01-02', '2013-01-03']))
result, bins = cut(data, 3, retbins=True)
expected = (
Series(IntervalIndex([
Interval(Timestamp('2012-12-31 23:57:07.200000'),
Timestamp('2013-01-01 16:00:00')),
Interval(Timestamp('2013-01-01 16:00:00'),
Timestamp('2013-01-02 08:00:00')),
Interval(Timestamp('2013-01-02 08:00:00'),
Timestamp('2013-01-03 00:00:00'))]))
.astype(CDT(ordered=True)))
tm.assert_series_equal(result, expected)
data = [np.datetime64('2013-01-01'), np.datetime64('2013-01-02'),
np.datetime64('2013-01-03')]
result, bins = cut(data, 3, retbins=True)
tm.assert_series_equal(Series(result), expected)
data = np.array([np.datetime64('2013-01-01'),
np.datetime64('2013-01-02'),
np.datetime64('2013-01-03')])
result, bins = cut(data, 3, retbins=True)
tm.assert_series_equal(Series(result), expected)
data = DatetimeIndex(['2013-01-01', '2013-01-02', '2013-01-03'])
result, bins = cut(data, 3, retbins=True)
tm.assert_series_equal(Series(result), expected)
def test_datetime_bin(self):
data = [np.datetime64('2012-12-13'), np.datetime64('2012-12-15')]
bin_data = ['2012-12-12', '2012-12-14', '2012-12-16']
expected = (
Series(IntervalIndex([
Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),
Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))]))
.astype(CDT(ordered=True)))
for conv in [Timestamp, Timestamp, np.datetime64]:
bins = [conv(v) for v in bin_data]
result = cut(data, bins=bins)
tm.assert_series_equal(Series(result), expected)
bin_pydatetime = [Timestamp(v).to_pydatetime() for v in bin_data]
result = cut(data, bins=bin_pydatetime)
tm.assert_series_equal(Series(result), expected)
bins = to_datetime(bin_data)
result = cut(data, bins=bin_pydatetime)
tm.assert_series_equal(Series(result), expected)
def test_datetime_nan(self):
def f():
cut(date_range('20130101', periods=3), bins=[0, 2, 4])
pytest.raises(ValueError, f)
result = cut(date_range('20130102', periods=5),
bins=date_range('20130101', periods=2))
mask = result.categories.isna()
tm.assert_numpy_array_equal(mask, np.array([False]))
mask = result.isna()
tm.assert_numpy_array_equal(
mask, np.array([False, True, True, True, True]))
@pytest.mark.parametrize(
"array_1_writeable, array_2_writeable",
[(True, True), (True, False), (False, False)])
def test_cut_read_only(self, array_1_writeable, array_2_writeable):
array_1 = np.arange(0, 100, 10)
array_1.flags.writeable = array_1_writeable
array_2 = np.arange(0, 100, 10)
array_2.flags.writeable = array_2_writeable
hundred_elements = np.arange(100)
tm.assert_categorical_equal(cut(hundred_elements, array_1),
cut(hundred_elements, array_2))
| true | true |
f7262bc097e3f3237af10e09e9f2a090111ba335 | 639 | py | Python | app1/migrations/0029_auto_20200630_0454.py | vashuteotia123/zbcvit | da29b3281ccc87481a264b63c5b6c3a549945f33 | [
"MIT"
] | 6 | 2021-09-16T16:46:56.000Z | 2022-02-06T13:00:08.000Z | app1/migrations/0029_auto_20200630_0454.py | vashuteotia123/zbcvit | da29b3281ccc87481a264b63c5b6c3a549945f33 | [
"MIT"
] | null | null | null | app1/migrations/0029_auto_20200630_0454.py | vashuteotia123/zbcvit | da29b3281ccc87481a264b63c5b6c3a549945f33 | [
"MIT"
] | 1 | 2021-09-14T09:26:58.000Z | 2021-09-14T09:26:58.000Z | # Generated by Django 3.0.6 on 2020-06-30 04:54
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0028_resources_resource_date_time'),
]
operations = [
migrations.AlterField(
model_name='resources',
name='resource_content',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='resources',
name='resource_date_time',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 30, 4, 54, 42, 601836)),
),
]
| 25.56 | 98 | 0.613459 |
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0028_resources_resource_date_time'),
]
operations = [
migrations.AlterField(
model_name='resources',
name='resource_content',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='resources',
name='resource_date_time',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 30, 4, 54, 42, 601836)),
),
]
| true | true |
f7262bf49b112b6af91f95cf730a2d48d81c073f | 35 | py | Python | tests/components/halohome/__init__.py | nayaverdier/core | 7352602e47c2de0e54a20f427559c00bad2cd7b8 | [
"Apache-2.0"
] | null | null | null | tests/components/halohome/__init__.py | nayaverdier/core | 7352602e47c2de0e54a20f427559c00bad2cd7b8 | [
"Apache-2.0"
] | null | null | null | tests/components/halohome/__init__.py | nayaverdier/core | 7352602e47c2de0e54a20f427559c00bad2cd7b8 | [
"Apache-2.0"
] | null | null | null | """HALO Home integration tests."""
| 17.5 | 34 | 0.685714 | true | true | |
f7262d295996bb4520eaf83ad3c5682b56392b0c | 4,741 | py | Python | lapidary/Utils.py | efeslab/dolma | 039c5ed768bc879a83424075ccccf3bbd31794ae | [
"BSD-3-Clause"
] | 6 | 2021-01-02T18:29:22.000Z | 2021-10-03T18:55:01.000Z | lapidary/Utils.py | efeslab/dolma | 039c5ed768bc879a83424075ccccf3bbd31794ae | [
"BSD-3-Clause"
] | 2 | 2021-02-15T15:34:34.000Z | 2022-01-30T17:46:00.000Z | lapidary/Utils.py | efeslab/dolma | 039c5ed768bc879a83424075ccccf3bbd31794ae | [
"BSD-3-Clause"
] | 3 | 2020-10-23T14:10:39.000Z | 2021-11-16T10:00:40.000Z | import json
from pathlib import Path
from pprint import pprint
import re, os
from time import sleep
class StatsFile:
def __init__(self, file_path):
self.current_offset = 0
self.file_size = 0
self.file_path = file_path
self.cached_stats = {}
self.backup_file_path = file_path.parent / "stats.backup.txt"
self.backup_stats = ''
def __del__(self):
print('Cleaning stats...')
if self.file_path.exists():
self.file_path.unlink()
if self.backup_file_path.exists():
self.backup_file_path.unlink()
with self.backup_file_path.open('w') as bk:
bk.write(self.backup_stats)
def get_current_stats(self):
import m5
stats = {}
m5.stats.dump()
if self.file_size == self.file_path.stat().st_size:
return self.cached_stats
with self.file_path.open('r') as fd:
tmp = fd.read()
self.backup_stats = tmp[self.current_offset:]
with self.file_path.open() as fd:
fd.seek(self.current_offset)
for line in fd:
if '--------' in line or len(line.strip()) == 0:
continue
pieces = [x for x in line.split(' ') if len(x.strip()) > 0]
if len(pieces) > 1:
key = pieces[0].strip()
val = pieces[1].strip()
stats[key] = val
self.current_offset = fd.tell()
self.cached_stats = stats
with self.file_path.open('w') as f:
f.truncate()
size = self.file_path.stat().st_size
self.file_size = size
return stats
def parse_perf_output_insts(stderr_str):
inst_pattern = re.compile('([0-9\,]+)\s*instructions')
for line in stderr_str.split('\n'):
line = line.strip()
matches = inst_pattern.match(line)
if matches is not None:
return int(matches.group(1).replace(',',''))
def get_num_insts_perf(cmd):
from subprocess import run, PIPE
if type(cmd) == str:
cmd = [cmd]
perf = ['perf', 'stat', '-e', 'instructions']
proc = run(perf + cmd, stdout=PIPE, stderr=PIPE)
return parse_perf_output_insts(proc.stderr.decode('ascii'))
def get_num_insts_perf_from_pid(pid):
from subprocess import Popen, PIPE
perf = ['perf', 'stat', '-e', 'instructions', '-p', str(pid)]
proc = Popen(perf, stdout=PIPE, stderr=PIPE)
sleep(timeout)
proc.terminate()
proc.wait()
return parse_perf_output_insts(proc.stderr.decode('ascii'))
def parse_mem_size_string(mem_size_str):
mem_pattern = re.compile(r'([0-9]+)([kmgKMG][bB])?')
matches = mem_pattern.match(mem_size_str)
if not matches:
raise Exception('{} is not a valid memory size!'.format(mem_size_str))
mem_size = int(matches.group(1))
try:
size_modifier = matches.group(2).lower()
if 'kb' in size_modifier:
mem_size *= 1024
if 'mb' in size_modifier:
mem_size *= (1024 ** 2)
if 'gb' in size_modifier:
mem_size *= (1024 ** 3)
except:
pass
return mem_size
def select_at_random(list_of_things, num_to_select):
import random
return random.sample(list_of_things, num_to_select)
def select_evenly_spaced(list_or_dict, num_to_select):
from natsort import natsorted, ns
from copy import copy
if num_to_select > len(list_or_dict):
return copy(list_or_dict)
sorted_keys = natsorted(list_or_dict, alg=ns.IGNORECASE)
# https://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm
f = lambda m, n: [i*n//m + n//(2*m) for i in range(m)]
indices = f(num_to_select, len(sorted_keys))
sublist = [ sorted_keys[i] for i in indices ]
if isinstance(list_or_dict, list):
return sublist
return { k: list_or_dict[k] for k in sublist }
def get_mem_size_from_mappings_file(mappings_file):
assert isinstance(mappings_file, Path)
with mappings_file.open() as f:
mappings = json.load(f)
return mappings['mem_size']
def get_directory_entries_by_time(directory_path):
from natsort import natsorted
assert isinstance(directory_path, Path)
get_name = lambda d: str(d.name)
return natsorted(directory_path.iterdir(), key=get_name)
def _get_msr(identifier):
import ctypes
libc = ctypes.CDLL(None)
syscall = libc.syscall
SYS_arch_prctl = 158
ret_val = ctypes.c_uint64()
syscall(SYS_arch_prctl, identifier, ctypes.pointer(ret_val))
return ret_val.value
def get_fs_base():
ARCH_GET_FS = 0x1003
return _get_msr(ARCH_GET_FS)
def get_gs_base():
ARCH_GET_GS = 0x1004
return _get_msr(ARCH_GET_GS)
| 30.986928 | 78 | 0.62645 | import json
from pathlib import Path
from pprint import pprint
import re, os
from time import sleep
class StatsFile:
def __init__(self, file_path):
self.current_offset = 0
self.file_size = 0
self.file_path = file_path
self.cached_stats = {}
self.backup_file_path = file_path.parent / "stats.backup.txt"
self.backup_stats = ''
def __del__(self):
print('Cleaning stats...')
if self.file_path.exists():
self.file_path.unlink()
if self.backup_file_path.exists():
self.backup_file_path.unlink()
with self.backup_file_path.open('w') as bk:
bk.write(self.backup_stats)
def get_current_stats(self):
import m5
stats = {}
m5.stats.dump()
if self.file_size == self.file_path.stat().st_size:
return self.cached_stats
with self.file_path.open('r') as fd:
tmp = fd.read()
self.backup_stats = tmp[self.current_offset:]
with self.file_path.open() as fd:
fd.seek(self.current_offset)
for line in fd:
if '--------' in line or len(line.strip()) == 0:
continue
pieces = [x for x in line.split(' ') if len(x.strip()) > 0]
if len(pieces) > 1:
key = pieces[0].strip()
val = pieces[1].strip()
stats[key] = val
self.current_offset = fd.tell()
self.cached_stats = stats
with self.file_path.open('w') as f:
f.truncate()
size = self.file_path.stat().st_size
self.file_size = size
return stats
def parse_perf_output_insts(stderr_str):
inst_pattern = re.compile('([0-9\,]+)\s*instructions')
for line in stderr_str.split('\n'):
line = line.strip()
matches = inst_pattern.match(line)
if matches is not None:
return int(matches.group(1).replace(',',''))
def get_num_insts_perf(cmd):
from subprocess import run, PIPE
if type(cmd) == str:
cmd = [cmd]
perf = ['perf', 'stat', '-e', 'instructions']
proc = run(perf + cmd, stdout=PIPE, stderr=PIPE)
return parse_perf_output_insts(proc.stderr.decode('ascii'))
def get_num_insts_perf_from_pid(pid):
from subprocess import Popen, PIPE
perf = ['perf', 'stat', '-e', 'instructions', '-p', str(pid)]
proc = Popen(perf, stdout=PIPE, stderr=PIPE)
sleep(timeout)
proc.terminate()
proc.wait()
return parse_perf_output_insts(proc.stderr.decode('ascii'))
def parse_mem_size_string(mem_size_str):
mem_pattern = re.compile(r'([0-9]+)([kmgKMG][bB])?')
matches = mem_pattern.match(mem_size_str)
if not matches:
raise Exception('{} is not a valid memory size!'.format(mem_size_str))
mem_size = int(matches.group(1))
try:
size_modifier = matches.group(2).lower()
if 'kb' in size_modifier:
mem_size *= 1024
if 'mb' in size_modifier:
mem_size *= (1024 ** 2)
if 'gb' in size_modifier:
mem_size *= (1024 ** 3)
except:
pass
return mem_size
def select_at_random(list_of_things, num_to_select):
import random
return random.sample(list_of_things, num_to_select)
def select_evenly_spaced(list_or_dict, num_to_select):
from natsort import natsorted, ns
from copy import copy
if num_to_select > len(list_or_dict):
return copy(list_or_dict)
sorted_keys = natsorted(list_or_dict, alg=ns.IGNORECASE)
f = lambda m, n: [i*n//m + n//(2*m) for i in range(m)]
indices = f(num_to_select, len(sorted_keys))
sublist = [ sorted_keys[i] for i in indices ]
if isinstance(list_or_dict, list):
return sublist
return { k: list_or_dict[k] for k in sublist }
def get_mem_size_from_mappings_file(mappings_file):
assert isinstance(mappings_file, Path)
with mappings_file.open() as f:
mappings = json.load(f)
return mappings['mem_size']
def get_directory_entries_by_time(directory_path):
from natsort import natsorted
assert isinstance(directory_path, Path)
get_name = lambda d: str(d.name)
return natsorted(directory_path.iterdir(), key=get_name)
def _get_msr(identifier):
import ctypes
libc = ctypes.CDLL(None)
syscall = libc.syscall
SYS_arch_prctl = 158
ret_val = ctypes.c_uint64()
syscall(SYS_arch_prctl, identifier, ctypes.pointer(ret_val))
return ret_val.value
def get_fs_base():
ARCH_GET_FS = 0x1003
return _get_msr(ARCH_GET_FS)
def get_gs_base():
ARCH_GET_GS = 0x1004
return _get_msr(ARCH_GET_GS)
| true | true |
f7262d782adc22a7c34947e4dd8321f0a9a524dc | 767 | py | Python | src/shop/migrations/0038_auto_20170323_2021.py | flokli/bornhack-website | 9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d | [
"BSD-3-Clause"
] | 7 | 2017-04-14T15:28:29.000Z | 2021-09-10T09:45:38.000Z | src/shop/migrations/0038_auto_20170323_2021.py | flokli/bornhack-website | 9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d | [
"BSD-3-Clause"
] | 799 | 2016-04-28T09:31:50.000Z | 2022-03-29T09:05:02.000Z | src/shop/migrations/0038_auto_20170323_2021.py | flokli/bornhack-website | 9dd6b0b23c2e6b1fb2c5f03a8766d4aa96d4443d | [
"BSD-3-Clause"
] | 35 | 2016-04-28T09:23:53.000Z | 2021-05-02T12:36:01.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-23 19:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("shop", "0037_auto_20170319_2204")]
operations = [
migrations.AlterField(
model_name="order",
name="payment_method",
field=models.CharField(
blank=True,
choices=[
("credit_card", "Credit card"),
("blockchain", "Blockchain"),
("bank_transfer", "Bank transfer"),
("cash", "Cash"),
],
default="",
max_length=50,
),
)
]
| 26.448276 | 56 | 0.494133 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("shop", "0037_auto_20170319_2204")]
operations = [
migrations.AlterField(
model_name="order",
name="payment_method",
field=models.CharField(
blank=True,
choices=[
("credit_card", "Credit card"),
("blockchain", "Blockchain"),
("bank_transfer", "Bank transfer"),
("cash", "Cash"),
],
default="",
max_length=50,
),
)
]
| true | true |
f7262d80fffdbffc0cf859384e8240918a32ea46 | 31,714 | py | Python | python/rz_linear/impl/RzLinearBackward.py | Jokeren/RzLinear | d318d95254cd5c3dcf814774d22dc71179450aa0 | [
"MIT"
] | null | null | null | python/rz_linear/impl/RzLinearBackward.py | Jokeren/RzLinear | d318d95254cd5c3dcf814774d22dc71179450aa0 | [
"MIT"
] | null | null | null | python/rz_linear/impl/RzLinearBackward.py | Jokeren/RzLinear | d318d95254cd5c3dcf814774d22dc71179450aa0 | [
"MIT"
] | null | null | null | from typing import Tuple
import torch
import triton
import triton.language as tl
def rz_linear_backward_tl(input: torch.tensor, hashed_weight: torch.tensor, output_grad: torch.tensor,
M: int, K: int, N: int, H: int,
R3: int, R2: int, R1: int, R0: int,
allow_tf32: bool = True, allow_autotune: bool = False,
BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,
GROUP_SIZE: int = 4) -> Tuple[torch.tensor, torch.tensor]:
input_grad = rz_linear_backward_input_grad_tl(output_grad, hashed_weight, M, K, N, H, R3, R2, R1, R0, allow_tf32=allow_tf32, allow_autotune=allow_autotune,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
weight_grad = rz_linear_backward_weight_grad_tl(input, output_grad, M, K, N, H, R3, R2, R1, R0, allow_tf32=allow_tf32, allow_autotune=allow_autotune,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
return input_grad, weight_grad
def rz_linear_backward_weight_grad_tl(input: torch.tensor, output_grad: torch.tensor,
M: int, K: int, N: int, H: int,
R3: int, R2: int, R1: int, R0: int,
allow_tf32: bool = True, allow_autotune: bool = True,
BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,
GROUP_SIZE: int = 8) -> torch.tensor:
'''
Compute input^T x output_grad and return a weight_grad tensor
Args:
input (Tensor): A MxK tensor
output_grad (Tensor): A MxN tensor
M, K, N, H (int): Matrix dimensions
R3, R2, R1, R0 (int): Random numbers
allow_tf32 (bool): If tensor core is allowed
BLOCK_SIZE_M, BLOCK_SIZE_N, BLOCK_SIZE_K, GROUP_SIZE: Matrix tiling parameters for performance tunning
Returns:
hashed_weight_grad (Tensor): A 1xH tensor
'''
assert (K % 4 == 0)
assert (N % 4 == 0)
# allocates output
hashed_weight_grad = torch.zeros(
(H), device=output_grad.device, dtype=output_grad.dtype)
# 1D launch kernel where each block gets its own program.
def grid(META): return (
triton.cdiv(K, META['BLOCK_SIZE_K']) *
triton.cdiv(N, META['BLOCK_SIZE_N']),
)
if allow_tf32:
assert (M % 32 == 0)
else:
assert (M % 8 == 0)
if allow_autotune:
if allow_tf32:
rz_linear_backward_weight_grad_kernel_tf32[grid](
input, output_grad, hashed_weight_grad,
M, N, K, H,
input.stride(1), input.stride(0),
output_grad.stride(0), output_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
rz_linear_backward_weight_grad_kernel_fp32[grid](
input, output_grad, hashed_weight_grad,
M, N, K, H,
input.stride(1), input.stride(0),
output_grad.stride(0), output_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
rz_linear_backward_weight_grad_kernel_notune[grid](
input, output_grad, hashed_weight_grad,
M, N, K, H,
input.stride(1), input.stride(0),
output_grad.stride(0), output_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=allow_tf32,
GROUP_SIZE=GROUP_SIZE,
BLOCK_SIZE_K=BLOCK_SIZE_K,
BLOCK_SIZE_M=BLOCK_SIZE_M,
BLOCK_SIZE_N=BLOCK_SIZE_N
)
return hashed_weight_grad
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
],
key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_backward_weight_grad_kernel_fp32(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_ak,
stride_bm, stride_bn,
# Random numbers
R3, R2, R1, R0,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=stride_bn,
R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=False,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
], key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_backward_weight_grad_kernel_tf32(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_ak,
stride_bm, stride_bn,
# Random numbers
R3, R2, R1, R0,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=stride_bn,
R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=True,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_backward_weight_grad_kernel_notune(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_ak,
stride_bm, stride_bn,
# Random numbers
R3, R2, R1, R0,
allow_tf32: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=stride_bn,
R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=allow_tf32,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_backward_weight_grad_core(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_ak,
stride_bm, stride_bn,
# Random numbers
R3, R2, R1, R0,
allow_tf32: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
"""Kernel for computing the matmul C = A^T x B.
A has shape (M, K), B has shape (M, N) and C has shape (K, N)
"""
pid = tl.program_id(axis=0)
num_pid_k = tl.cdiv(K, BLOCK_SIZE_K)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE * num_pid_n
group_id = pid // num_pid_in_group
first_pid_k = group_id * GROUP_SIZE
group_size_k = min(num_pid_k - first_pid_k, GROUP_SIZE)
pid_k = first_pid_k + (pid % group_size_k)
pid_n = (pid % num_pid_in_group) // group_size_k
# [BLOCK_SIZE_K, BLOCK_SIZE_M]
offs_ak = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
offs_am = tl.arange(0, BLOCK_SIZE_M)
a_ptrs = a_ptr + offs_ak[:, None] * \
stride_am + offs_am[None, :] * stride_ak
# [BLOCK_SIZE_M, BLOCK_SIZE_N]
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_bm = tl.arange(0, BLOCK_SIZE_M)
b_ptrs = b_ptr + offs_bm[:, None] * \
stride_bm + offs_bn[None, :] * stride_bn
# [BLOCK_SIZE_K, BLOCK_SIZE_N]
c = tl.zeros((BLOCK_SIZE_K, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, M//BLOCK_SIZE_M):
# Note that for simplicity, we don't apply a mask here.
# This means that if M is not a multiple of BLOCK_SIZE_M,
# this will access out-of-bounds memory and produce an
# error or (worse!) incorrect results.
# TODO(Keren): Add M checks
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
# We accumulate along the M dimension
c += tl.dot(a, b, allow_tf32=allow_tf32)
# Advance the ptrs to the next M block
a_ptrs += BLOCK_SIZE_M * stride_ak
b_ptrs += BLOCK_SIZE_M * stride_bm
# -----------------------------------------------------------
# Write back the block of the output matrix C
c_offset = c_ptr + tl.arange(0, BLOCK_SIZE_K)[:, None] * \
BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
c_ptrs = c_offset + (pid_k * R3 + pid_n * R2 +
R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
tl.atomic_add(c_ptrs, c)
def rz_linear_backward_input_grad_tl(output_grad: torch.tensor, hashed_weight: torch.tensor,
M: int, K: int, N: int, H: int,
R3: int, R2: int, R1: int, R0: int,
allow_tf32: bool = True, allow_autotune: bool = True,
BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,
GROUP_SIZE: int = 4) -> torch.tensor:
'''
Compute output_grad x hashed_weight^T and return an input_grad tensor
Args:
output_grad (Tensor): A MxN tensor
hashed_weight (Tensor): A 1xH (KxN) tensor
M, K, N, H (int): matrix dimensions
R3, R2, R1, R0 (int): random numbers
allow_tf32 (bool): If tensor core is allowed
BLOCK_SIZE_M, BLOCK_SIZE_N, BLOCK_SIZE_K, GROUP_SIZE: Matrix tiling parameters for performance tunning
Returns:
input_grad (Tensor): A MxK tensor
'''
assert (M % 4 == 0)
assert (K % 4 == 0)
# allocates output
input_grad = torch.empty(
(M, K), device=output_grad.device, dtype=output_grad.dtype)
if allow_tf32:
assert (N % 32 == 0)
else:
assert (N % 8 == 0)
# 1D launch kernel where each block gets its own program.
def grid(META): return (
triton.cdiv(M, META['BLOCK_SIZE_M']) *
triton.cdiv(K, META['BLOCK_SIZE_K']),
)
if allow_autotune:
if allow_tf32:
rz_linear_backward_input_grad_kernel_tf32[grid](
output_grad, hashed_weight, input_grad,
M, N, K, H,
output_grad.stride(0), output_grad.stride(1),
input_grad.stride(0), input_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
rz_linear_backward_input_grad_kernel_fp32[grid](
output_grad, hashed_weight, input_grad,
M, N, K, H,
output_grad.stride(0), output_grad.stride(1),
input_grad.stride(0), input_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
rz_linear_backward_input_grad_kernel_notune[grid](
output_grad, hashed_weight, input_grad,
M, N, K, H,
output_grad.stride(0), output_grad.stride(1),
input_grad.stride(0), input_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=allow_tf32,
num_warps=4,
num_stages=3,
BLOCK_SIZE_M=BLOCK_SIZE_M,
BLOCK_SIZE_N=BLOCK_SIZE_N,
BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE
)
return input_grad
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 32,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 32,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
],
key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_backward_input_grad_kernel_fp32(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_an,
stride_cm, stride_ck,
# Random numbers
R3, R2, R1, R0,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,
M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_an=stride_an,
stride_cm=stride_cm, stride_ck=stride_ck,
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=False,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
], key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_backward_input_grad_kernel_tf32(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_an,
stride_cm, stride_ck,
# Random numbers
R3, R2, R1, R0,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,
M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_an=stride_an,
stride_cm=stride_cm, stride_ck=stride_ck,
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=True,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_backward_input_grad_kernel_notune(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_an,
stride_cm, stride_ck,
# Random numbers
R3, R2, R1, R0,
allow_tf32: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,
M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_an=stride_an,
stride_cm=stride_cm, stride_ck=stride_ck,
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=allow_tf32,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_backward_input_grad_core(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_an,
stride_cm, stride_ck,
# Random numbers
R3, R2, R1, R0,
allow_tf32: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
"""Kernel for computing the matmul C = (A x B^T)
A has shape (M, N), B has shape H->(K, N) and C has shape (M, K)
"""
pid = tl.program_id(axis=0)
num_pid_k = tl.cdiv(K, BLOCK_SIZE_K)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid // num_pid_k
pid_k = pid % num_pid_k
# [BLOCK_SIZE_M, BLOCK_SIZE_N]
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_an = tl.arange(0, BLOCK_SIZE_N)
a_ptrs = a_ptr + offs_am[:, None] * \
stride_am + offs_an[None, :] * stride_an
# [BLOCK_SIZE_N, BLOCK_SIZE_K]
# Compute hash
b_offset = b_ptr + \
tl.arange(0, BLOCK_SIZE_N)[
:, None] + tl.arange(0, BLOCK_SIZE_K)[None, :] * BLOCK_SIZE_N
b_ptrs = b_offset + (pid_k * R3 + 0 * R2 +
R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
# [BLOCK_SIZE_M, BLOCK_SIZE_K]
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_K), dtype=tl.float32)
for n in range(0, N//BLOCK_SIZE_N):
# Note that for simplicity, we don't apply a mask here.
# This means that if N is not a multiple of BLOCK_SIZE_N,
# this will access out-of-bounds memory and produce an
# error or (worse!) incorrect results.
# TODO(Keren): Add N checks
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
# We accumulate along the N dimension
c += tl.dot(a, b, allow_tf32=allow_tf32)
# Advance the ptrs to the next N block
a_ptrs += BLOCK_SIZE_N * stride_an
b_ptrs = b_offset + (pid_k * R3 + (n + 1) * R2 +
R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
# -----------------------------------------------------------
# Write back the block of the output matrix C
# [BLOCK_SIZE_M, BLOCK_SIZE_K]
offs_ck = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
c_ptrs = c_ptr + stride_cm * \
offs_cm[:, None] + stride_ck * offs_ck[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_ck[None, :] < K)
tl.store(c_ptrs, c, mask=c_mask)
| 48.124431 | 159 | 0.574037 | from typing import Tuple
import torch
import triton
import triton.language as tl
def rz_linear_backward_tl(input: torch.tensor, hashed_weight: torch.tensor, output_grad: torch.tensor,
M: int, K: int, N: int, H: int,
R3: int, R2: int, R1: int, R0: int,
allow_tf32: bool = True, allow_autotune: bool = False,
BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,
GROUP_SIZE: int = 4) -> Tuple[torch.tensor, torch.tensor]:
input_grad = rz_linear_backward_input_grad_tl(output_grad, hashed_weight, M, K, N, H, R3, R2, R1, R0, allow_tf32=allow_tf32, allow_autotune=allow_autotune,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
weight_grad = rz_linear_backward_weight_grad_tl(input, output_grad, M, K, N, H, R3, R2, R1, R0, allow_tf32=allow_tf32, allow_autotune=allow_autotune,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
return input_grad, weight_grad
def rz_linear_backward_weight_grad_tl(input: torch.tensor, output_grad: torch.tensor,
M: int, K: int, N: int, H: int,
R3: int, R2: int, R1: int, R0: int,
allow_tf32: bool = True, allow_autotune: bool = True,
BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,
GROUP_SIZE: int = 8) -> torch.tensor:
assert (K % 4 == 0)
assert (N % 4 == 0)
hashed_weight_grad = torch.zeros(
(H), device=output_grad.device, dtype=output_grad.dtype)
def grid(META): return (
triton.cdiv(K, META['BLOCK_SIZE_K']) *
triton.cdiv(N, META['BLOCK_SIZE_N']),
)
if allow_tf32:
assert (M % 32 == 0)
else:
assert (M % 8 == 0)
if allow_autotune:
if allow_tf32:
rz_linear_backward_weight_grad_kernel_tf32[grid](
input, output_grad, hashed_weight_grad,
M, N, K, H,
input.stride(1), input.stride(0),
output_grad.stride(0), output_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
rz_linear_backward_weight_grad_kernel_fp32[grid](
input, output_grad, hashed_weight_grad,
M, N, K, H,
input.stride(1), input.stride(0),
output_grad.stride(0), output_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
rz_linear_backward_weight_grad_kernel_notune[grid](
input, output_grad, hashed_weight_grad,
M, N, K, H,
input.stride(1), input.stride(0),
output_grad.stride(0), output_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=allow_tf32,
GROUP_SIZE=GROUP_SIZE,
BLOCK_SIZE_K=BLOCK_SIZE_K,
BLOCK_SIZE_M=BLOCK_SIZE_M,
BLOCK_SIZE_N=BLOCK_SIZE_N
)
return hashed_weight_grad
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 16}, num_stages=2, num_warps=4),
],
key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_backward_weight_grad_kernel_fp32(
a_ptr, b_ptr, c_ptr,
M, N, K, H,
stride_am, stride_ak,
stride_bm, stride_bn,
R3, R2, R1, R0,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=stride_bn,
R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=False,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_M': 32}, num_stages=2, num_warps=4),
], key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_backward_weight_grad_kernel_tf32(
a_ptr, b_ptr, c_ptr,
M, N, K, H,
stride_am, stride_ak,
stride_bm, stride_bn,
R3, R2, R1, R0,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=stride_bn,
R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=True,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_backward_weight_grad_kernel_notune(
a_ptr, b_ptr, c_ptr,
M, N, K, H,
stride_am, stride_ak,
stride_bm, stride_bn,
R3, R2, R1, R0,
allow_tf32: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_weight_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_ak=stride_ak, stride_bm=stride_bm, stride_bn=stride_bn,
R3=R3, R2=R2, R1=R1, R0=R0, allow_tf32=allow_tf32,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_backward_weight_grad_core(
a_ptr, b_ptr, c_ptr,
M, N, K, H,
stride_am, stride_ak,
stride_bm, stride_bn,
R3, R2, R1, R0,
allow_tf32: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
pid = tl.program_id(axis=0)
num_pid_k = tl.cdiv(K, BLOCK_SIZE_K)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE * num_pid_n
group_id = pid // num_pid_in_group
first_pid_k = group_id * GROUP_SIZE
group_size_k = min(num_pid_k - first_pid_k, GROUP_SIZE)
pid_k = first_pid_k + (pid % group_size_k)
pid_n = (pid % num_pid_in_group) // group_size_k
offs_ak = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
offs_am = tl.arange(0, BLOCK_SIZE_M)
a_ptrs = a_ptr + offs_ak[:, None] * \
stride_am + offs_am[None, :] * stride_ak
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_bm = tl.arange(0, BLOCK_SIZE_M)
b_ptrs = b_ptr + offs_bm[:, None] * \
stride_bm + offs_bn[None, :] * stride_bn
c = tl.zeros((BLOCK_SIZE_K, BLOCK_SIZE_N), dtype=tl.float32)
for _ in range(0, M//BLOCK_SIZE_M):
# This means that if M is not a multiple of BLOCK_SIZE_M,
# this will access out-of-bounds memory and produce an
# error or (worse!) incorrect results.
# TODO(Keren): Add M checks
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
# We accumulate along the M dimension
c += tl.dot(a, b, allow_tf32=allow_tf32)
# Advance the ptrs to the next M block
a_ptrs += BLOCK_SIZE_M * stride_ak
b_ptrs += BLOCK_SIZE_M * stride_bm
# -----------------------------------------------------------
# Write back the block of the output matrix C
c_offset = c_ptr + tl.arange(0, BLOCK_SIZE_K)[:, None] * \
BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
c_ptrs = c_offset + (pid_k * R3 + pid_n * R2 +
R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
tl.atomic_add(c_ptrs, c)
def rz_linear_backward_input_grad_tl(output_grad: torch.tensor, hashed_weight: torch.tensor,
M: int, K: int, N: int, H: int,
R3: int, R2: int, R1: int, R0: int,
allow_tf32: bool = True, allow_autotune: bool = True,
BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,
GROUP_SIZE: int = 4) -> torch.tensor:
assert (M % 4 == 0)
assert (K % 4 == 0)
# allocates output
input_grad = torch.empty(
(M, K), device=output_grad.device, dtype=output_grad.dtype)
if allow_tf32:
assert (N % 32 == 0)
else:
assert (N % 8 == 0)
# 1D launch kernel where each block gets its own program.
def grid(META): return (
triton.cdiv(M, META['BLOCK_SIZE_M']) *
triton.cdiv(K, META['BLOCK_SIZE_K']),
)
if allow_autotune:
if allow_tf32:
rz_linear_backward_input_grad_kernel_tf32[grid](
output_grad, hashed_weight, input_grad,
M, N, K, H,
output_grad.stride(0), output_grad.stride(1),
input_grad.stride(0), input_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
rz_linear_backward_input_grad_kernel_fp32[grid](
output_grad, hashed_weight, input_grad,
M, N, K, H,
output_grad.stride(0), output_grad.stride(1),
input_grad.stride(0), input_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
rz_linear_backward_input_grad_kernel_notune[grid](
output_grad, hashed_weight, input_grad,
M, N, K, H,
output_grad.stride(0), output_grad.stride(1),
input_grad.stride(0), input_grad.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=allow_tf32,
num_warps=4,
num_stages=3,
BLOCK_SIZE_M=BLOCK_SIZE_M,
BLOCK_SIZE_N=BLOCK_SIZE_N,
BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE
)
return input_grad
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 32,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 32,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 16}, num_stages=2, num_warps=4),
],
key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_backward_input_grad_kernel_fp32(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_an,
stride_cm, stride_ck,
# Random numbers
R3, R2, R1, R0,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,
M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_an=stride_an,
stride_cm=stride_cm, stride_ck=stride_ck,
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=False,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 256,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 128,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_K': 64,
'BLOCK_SIZE_N': 32}, num_stages=2, num_warps=4),
], key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_backward_input_grad_kernel_tf32(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_an,
stride_cm, stride_ck,
# Random numbers
R3, R2, R1, R0,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,
M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_an=stride_an,
stride_cm=stride_cm, stride_ck=stride_ck,
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=True,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_backward_input_grad_kernel_notune(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_an,
stride_cm, stride_ck,
# Random numbers
R3, R2, R1, R0,
allow_tf32: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_backward_input_grad_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr,
M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_an=stride_an,
stride_cm=stride_cm, stride_ck=stride_ck,
R3=R3, R2=R2, R1=R1, R0=R0,
allow_tf32=allow_tf32,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_backward_input_grad_core(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_an,
stride_cm, stride_ck,
# Random numbers
R3, R2, R1, R0,
allow_tf32: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
pid = tl.program_id(axis=0)
num_pid_k = tl.cdiv(K, BLOCK_SIZE_K)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
pid_m = pid // num_pid_k
pid_k = pid % num_pid_k
# [BLOCK_SIZE_M, BLOCK_SIZE_N]
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_an = tl.arange(0, BLOCK_SIZE_N)
a_ptrs = a_ptr + offs_am[:, None] * \
stride_am + offs_an[None, :] * stride_an
# [BLOCK_SIZE_N, BLOCK_SIZE_K]
# Compute hash
b_offset = b_ptr + \
tl.arange(0, BLOCK_SIZE_N)[
:, None] + tl.arange(0, BLOCK_SIZE_K)[None, :] * BLOCK_SIZE_N
b_ptrs = b_offset + (pid_k * R3 + 0 * R2 +
R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
# [BLOCK_SIZE_M, BLOCK_SIZE_K]
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_K), dtype=tl.float32)
for n in range(0, N//BLOCK_SIZE_N):
# Note that for simplicity, we don't apply a mask here.
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
c += tl.dot(a, b, allow_tf32=allow_tf32)
a_ptrs += BLOCK_SIZE_N * stride_an
b_ptrs = b_offset + (pid_k * R3 + (n + 1) * R2 +
R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
offs_ck = pid_k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
c_ptrs = c_ptr + stride_cm * \
offs_cm[:, None] + stride_ck * offs_ck[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_ck[None, :] < K)
tl.store(c_ptrs, c, mask=c_mask)
| true | true |
f7262ddf11eb39c0eb95d69c00ccbce7ab819c84 | 1,604 | py | Python | test/lazy/test_added_diag_lazy_tensor.py | cdgreenidge/gpytorch | d4cc610963bd812052e43e3aed84fb8b2ec94aa6 | [
"MIT"
] | null | null | null | test/lazy/test_added_diag_lazy_tensor.py | cdgreenidge/gpytorch | d4cc610963bd812052e43e3aed84fb8b2ec94aa6 | [
"MIT"
] | null | null | null | test/lazy/test_added_diag_lazy_tensor.py | cdgreenidge/gpytorch | d4cc610963bd812052e43e3aed84fb8b2ec94aa6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import torch
import unittest
from gpytorch.lazy import NonLazyTensor, DiagLazyTensor, AddedDiagLazyTensor
from test.lazy._lazy_tensor_test_case import LazyTensorTestCase
class TestAddedDiagLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 0
should_test_sample = True
def create_lazy_tensor(self):
tensor = torch.randn(5, 5)
tensor = tensor.transpose(-1, -2).matmul(tensor)
tensor.requires_grad_(True)
diag = torch.tensor([1.0, 2.0, 4.0, 2.0, 3.0], requires_grad=True)
return AddedDiagLazyTensor(NonLazyTensor(tensor), DiagLazyTensor(diag))
def evaluate_lazy_tensor(self, lazy_tensor):
diag = lazy_tensor._diag_tensor._diag
tensor = lazy_tensor._lazy_tensor.tensor
return tensor + diag.diag()
class TestAddedDiagLazyTensorBatch(LazyTensorTestCase, unittest.TestCase):
seed = 4
should_test_sample = True
def create_lazy_tensor(self):
tensor = torch.randn(3, 5, 5)
tensor = tensor.transpose(-1, -2).matmul(tensor)
tensor.requires_grad_(True)
diag = torch.tensor(
[[1.0, 2.0, 4.0, 2.0, 3.0], [2.0, 1.0, 2.0, 1.0, 4.0], [1.0, 2.0, 2.0, 3.0, 4.0]], requires_grad=True
)
return AddedDiagLazyTensor(NonLazyTensor(tensor), DiagLazyTensor(diag))
def evaluate_lazy_tensor(self, lazy_tensor):
diag = lazy_tensor._diag_tensor._diag
tensor = lazy_tensor._lazy_tensor.tensor
return tensor + torch.cat([diag[i].diag().unsqueeze(0) for i in range(3)])
if __name__ == "__main__":
unittest.main()
| 34.12766 | 113 | 0.682045 |
import torch
import unittest
from gpytorch.lazy import NonLazyTensor, DiagLazyTensor, AddedDiagLazyTensor
from test.lazy._lazy_tensor_test_case import LazyTensorTestCase
class TestAddedDiagLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 0
should_test_sample = True
def create_lazy_tensor(self):
tensor = torch.randn(5, 5)
tensor = tensor.transpose(-1, -2).matmul(tensor)
tensor.requires_grad_(True)
diag = torch.tensor([1.0, 2.0, 4.0, 2.0, 3.0], requires_grad=True)
return AddedDiagLazyTensor(NonLazyTensor(tensor), DiagLazyTensor(diag))
def evaluate_lazy_tensor(self, lazy_tensor):
diag = lazy_tensor._diag_tensor._diag
tensor = lazy_tensor._lazy_tensor.tensor
return tensor + diag.diag()
class TestAddedDiagLazyTensorBatch(LazyTensorTestCase, unittest.TestCase):
seed = 4
should_test_sample = True
def create_lazy_tensor(self):
tensor = torch.randn(3, 5, 5)
tensor = tensor.transpose(-1, -2).matmul(tensor)
tensor.requires_grad_(True)
diag = torch.tensor(
[[1.0, 2.0, 4.0, 2.0, 3.0], [2.0, 1.0, 2.0, 1.0, 4.0], [1.0, 2.0, 2.0, 3.0, 4.0]], requires_grad=True
)
return AddedDiagLazyTensor(NonLazyTensor(tensor), DiagLazyTensor(diag))
def evaluate_lazy_tensor(self, lazy_tensor):
diag = lazy_tensor._diag_tensor._diag
tensor = lazy_tensor._lazy_tensor.tensor
return tensor + torch.cat([diag[i].diag().unsqueeze(0) for i in range(3)])
if __name__ == "__main__":
unittest.main()
| true | true |
f7262e47ccd52c7b99efed84275928088089c827 | 1,307 | py | Python | rally/plugins/openstack/scenarios/monasca/metrics.py | mail2nsrajesh/rally | d8995226fe75c573d6d64c7ade8a4ceca0758366 | [
"Apache-2.0"
] | null | null | null | rally/plugins/openstack/scenarios/monasca/metrics.py | mail2nsrajesh/rally | d8995226fe75c573d6d64c7ade8a4ceca0758366 | [
"Apache-2.0"
] | null | null | null | rally/plugins/openstack/scenarios/monasca/metrics.py | mail2nsrajesh/rally | d8995226fe75c573d6d64c7ade8a4ceca0758366 | [
"Apache-2.0"
] | null | null | null | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.monasca import utils as monascautils
from rally.task import validation
"""Scenarios for monasca Metrics API."""
@validation.add("required_services",
services=[consts.Service.MONASCA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="MonascaMetrics.list_metrics")
class ListMetrics(monascautils.MonascaScenario):
def run(self, **kwargs):
"""Fetch user's metrics.
:param kwargs: optional arguments for list query:
name, dimensions, start_time, etc
"""
self._list_metrics(**kwargs)
| 35.324324 | 78 | 0.722265 |
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.monasca import utils as monascautils
from rally.task import validation
@validation.add("required_services",
services=[consts.Service.MONASCA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="MonascaMetrics.list_metrics")
class ListMetrics(monascautils.MonascaScenario):
def run(self, **kwargs):
self._list_metrics(**kwargs)
| true | true |
f7262f0f2023d57b85b341ec89f31256cc3f6c36 | 4,563 | py | Python | onmt/models/model_saver.py | UKPLab/emnlp2019-dualgraph | da38be675c392af43db436e3b2f0c8ff355c04f9 | [
"MIT"
] | 25 | 2019-09-05T07:57:45.000Z | 2021-12-08T01:59:57.000Z | onmt/models/model_saver.py | 15071347094/emnlp2019-dualgraph | 0c58fb7f3ad3b9da3b92b2d2841558807fc79fd0 | [
"MIT"
] | 2 | 2020-11-21T00:41:44.000Z | 2020-11-25T00:36:19.000Z | onmt/models/model_saver.py | 15071347094/emnlp2019-dualgraph | 0c58fb7f3ad3b9da3b92b2d2841558807fc79fd0 | [
"MIT"
] | 6 | 2020-01-27T22:54:56.000Z | 2020-11-24T02:48:05.000Z | import os
import torch
import torch.nn as nn
from collections import deque
from onmt.utils.logging import logger
from copy import deepcopy
def build_model_saver(model_opt, opt, model, fields, optim):
model_saver = ModelSaver(opt.save_model,
model,
model_opt,
fields,
optim,
opt.keep_checkpoint)
return model_saver
class ModelSaverBase(object):
"""Base class for model saving operations
Inherited classes must implement private methods:
* `_save`
* `_rm_checkpoint
"""
def __init__(self, base_path, model, model_opt, fields, optim,
keep_checkpoint=-1):
self.base_path = base_path
self.model = model
self.model_opt = model_opt
self.fields = fields
self.optim = optim
self.last_saved_step = None
self.keep_checkpoint = keep_checkpoint
if keep_checkpoint > 0:
self.checkpoint_queue = deque([], maxlen=keep_checkpoint)
def save(self, step, moving_average=None):
"""Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic
"""
if self.keep_checkpoint == 0 or step == self.last_saved_step:
return
if moving_average:
save_model = deepcopy(self.model)
for avg, param in zip(moving_average, save_model.parameters()):
param.data.copy_(avg.data)
else:
save_model = self.model
chkpt, chkpt_name = self._save(step, save_model)
self.last_saved_step = step
if moving_average:
del save_model
if self.keep_checkpoint > 0:
if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:
todel = self.checkpoint_queue.popleft()
self._rm_checkpoint(todel)
self.checkpoint_queue.append(chkpt_name)
def _save(self, step):
"""Save a resumable checkpoint.
Args:
step (int): step number
Returns:
(object, str):
* checkpoint: the saved object
* checkpoint_name: name (or path) of the saved checkpoint
"""
raise NotImplementedError()
def _rm_checkpoint(self, name):
"""Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath)
"""
raise NotImplementedError()
class ModelSaver(ModelSaverBase):
"""Simple model saver to filesystem"""
def _save(self, step, model):
real_model = (model.module
if isinstance(model, nn.DataParallel)
else model)
real_generator = (real_model.generator.module
if isinstance(real_model.generator, nn.DataParallel)
else real_model.generator)
model_state_dict = real_model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = real_generator.state_dict()
# NOTE: We need to trim the vocab to remove any unk tokens that
# were not originally here.
vocab = deepcopy(self.fields)
if hasattr(model.encoder, 'is_graph_encoder'):
sides = ["src", "node1", "node2", "tgt"]
else:
sides = ["src", "tgt"]
for side in sides:
keys_to_pop = []
if hasattr(vocab[side], "fields"):
unk_token = vocab[side].fields[0][1].vocab.itos[0]
for key, value in vocab[side].fields[0][1].vocab.stoi.items():
if value == 0 and key != unk_token:
keys_to_pop.append(key)
for key in keys_to_pop:
vocab[side].fields[0][1].vocab.stoi.pop(key, None)
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': vocab,
'opt': self.model_opt,
'optim': self.optim.state_dict(),
}
logger.info("Saving checkpoint %s_step_%d.pt" % (self.base_path, step))
checkpoint_path = '%s_step_%d.pt' % (self.base_path, step)
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _rm_checkpoint(self, name):
os.remove(name)
| 31.040816 | 79 | 0.570896 | import os
import torch
import torch.nn as nn
from collections import deque
from onmt.utils.logging import logger
from copy import deepcopy
def build_model_saver(model_opt, opt, model, fields, optim):
model_saver = ModelSaver(opt.save_model,
model,
model_opt,
fields,
optim,
opt.keep_checkpoint)
return model_saver
class ModelSaverBase(object):
def __init__(self, base_path, model, model_opt, fields, optim,
keep_checkpoint=-1):
self.base_path = base_path
self.model = model
self.model_opt = model_opt
self.fields = fields
self.optim = optim
self.last_saved_step = None
self.keep_checkpoint = keep_checkpoint
if keep_checkpoint > 0:
self.checkpoint_queue = deque([], maxlen=keep_checkpoint)
def save(self, step, moving_average=None):
if self.keep_checkpoint == 0 or step == self.last_saved_step:
return
if moving_average:
save_model = deepcopy(self.model)
for avg, param in zip(moving_average, save_model.parameters()):
param.data.copy_(avg.data)
else:
save_model = self.model
chkpt, chkpt_name = self._save(step, save_model)
self.last_saved_step = step
if moving_average:
del save_model
if self.keep_checkpoint > 0:
if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:
todel = self.checkpoint_queue.popleft()
self._rm_checkpoint(todel)
self.checkpoint_queue.append(chkpt_name)
def _save(self, step):
raise NotImplementedError()
def _rm_checkpoint(self, name):
raise NotImplementedError()
class ModelSaver(ModelSaverBase):
def _save(self, step, model):
real_model = (model.module
if isinstance(model, nn.DataParallel)
else model)
real_generator = (real_model.generator.module
if isinstance(real_model.generator, nn.DataParallel)
else real_model.generator)
model_state_dict = real_model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = real_generator.state_dict()
vocab = deepcopy(self.fields)
if hasattr(model.encoder, 'is_graph_encoder'):
sides = ["src", "node1", "node2", "tgt"]
else:
sides = ["src", "tgt"]
for side in sides:
keys_to_pop = []
if hasattr(vocab[side], "fields"):
unk_token = vocab[side].fields[0][1].vocab.itos[0]
for key, value in vocab[side].fields[0][1].vocab.stoi.items():
if value == 0 and key != unk_token:
keys_to_pop.append(key)
for key in keys_to_pop:
vocab[side].fields[0][1].vocab.stoi.pop(key, None)
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': vocab,
'opt': self.model_opt,
'optim': self.optim.state_dict(),
}
logger.info("Saving checkpoint %s_step_%d.pt" % (self.base_path, step))
checkpoint_path = '%s_step_%d.pt' % (self.base_path, step)
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _rm_checkpoint(self, name):
os.remove(name)
| true | true |
f7262f809657c84b116f0216cd007d0f3032680e | 393 | py | Python | CTForces/wsgi.py | pomo-mondreganto/CTForces-old | 86758192f800108ff109f07fe155d5a98b4a3e14 | [
"MIT"
] | null | null | null | CTForces/wsgi.py | pomo-mondreganto/CTForces-old | 86758192f800108ff109f07fe155d5a98b4a3e14 | [
"MIT"
] | 6 | 2021-10-01T14:18:34.000Z | 2021-10-01T14:19:17.000Z | CTForces/wsgi.py | pomo-mondreganto/CTForces-old | 86758192f800108ff109f07fe155d5a98b4a3e14 | [
"MIT"
] | null | null | null | """
WSGI config for CTForces project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CTForces.settings")
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CTForces.settings")
application = get_wsgi_application()
| true | true |
f7262f94e75074ce3f184e5fc532f018ccd981b9 | 1,224 | py | Python | 6.0-expresiones-regulares/src/spider.py | zehemz/clases-python-101 | 633cb5f0cbc85e64e242514f0394754a5bed0513 | [
"Apache-2.0"
] | null | null | null | 6.0-expresiones-regulares/src/spider.py | zehemz/clases-python-101 | 633cb5f0cbc85e64e242514f0394754a5bed0513 | [
"Apache-2.0"
] | null | null | null | 6.0-expresiones-regulares/src/spider.py | zehemz/clases-python-101 | 633cb5f0cbc85e64e242514f0394754a5bed0513 | [
"Apache-2.0"
] | null | null | null | '''
Created on Jul 27, 2016
@author: zehemz
'''
from lxml import html
from lxml import etree
from io import StringIO, BytesIO
from copy import deepcopy
import requests
import re
import pickle
def writeList(list):
file = open("websScrapeadas.txt", "wb")
for element in list:
file.write(element+ '\n')
file.close()
BASE_URL = 'http://www.clarin.com'
page = requests.get('http://www.clarin.com')
try:
compiledRe = re.compile(r'href=[\'"]?(.*_0_[^\'">]+)')
matches = compiledRe.findall(page.content)
print(matches)
except Exception as error:
print(type(error))
print(error)
modifiedUrls = [(BASE_URL + url) for url in matches]
pickle.dump(modifiedUrls, open("lista.p", "wb"))
writeList(modifiedUrls)
count = 0
for url in modifiedUrls:
try:
page = requests.get(url)
except Exception as error:
print(error)
continue
try:
tree = html.fromstring(page.content)
except Exception as error:
print(error)
continue
nota = tree.xpath('//div[@class="nota"]//p/text()')
file = open(str(count)+".txt", "wb")
for parrafo in nota:
file.write(parrafo.encode("utf-8"))
file.close()
count +=1
| 21.473684 | 58 | 0.631536 | from lxml import html
from lxml import etree
from io import StringIO, BytesIO
from copy import deepcopy
import requests
import re
import pickle
def writeList(list):
file = open("websScrapeadas.txt", "wb")
for element in list:
file.write(element+ '\n')
file.close()
BASE_URL = 'http://www.clarin.com'
page = requests.get('http://www.clarin.com')
try:
compiledRe = re.compile(r'href=[\'"]?(.*_0_[^\'">]+)')
matches = compiledRe.findall(page.content)
print(matches)
except Exception as error:
print(type(error))
print(error)
modifiedUrls = [(BASE_URL + url) for url in matches]
pickle.dump(modifiedUrls, open("lista.p", "wb"))
writeList(modifiedUrls)
count = 0
for url in modifiedUrls:
try:
page = requests.get(url)
except Exception as error:
print(error)
continue
try:
tree = html.fromstring(page.content)
except Exception as error:
print(error)
continue
nota = tree.xpath('//div[@class="nota"]//p/text()')
file = open(str(count)+".txt", "wb")
for parrafo in nota:
file.write(parrafo.encode("utf-8"))
file.close()
count +=1
| true | true |
f7263292ff3ace28bdfe5eb80b3b1e74c3e9814c | 945 | py | Python | commands/help.py | DevStrikerTech/Clash-of-Clans-Band-Bot | 472c12feeefe053247458c133ee822b16e7537e1 | [
"MIT"
] | 19 | 2021-01-17T02:09:42.000Z | 2021-01-27T00:49:42.000Z | commands/help.py | DevStrikerTech/Clash-of-Clans-Band-Bot | 472c12feeefe053247458c133ee822b16e7537e1 | [
"MIT"
] | null | null | null | commands/help.py | DevStrikerTech/Clash-of-Clans-Band-Bot | 472c12feeefe053247458c133ee822b16e7537e1 | [
"MIT"
] | 20 | 2021-01-26T19:24:23.000Z | 2022-03-10T14:02:49.000Z | from routes.band import write_comment
class Help:
def __init__(self, get_all_post):
self.get_all_post = get_all_post
self.help_information()
def help_information(self):
get_all_post = self.get_all_post
post_response_content = get_all_post['result_data']['items']
for item in post_response_content:
commment_count = item['comment_count']
content = item['content']
post_key = item['post_key']
if '!help' in content and commment_count == 0:
write_comment(comment_body=f'Here are the list of bot commands:\n'
f'!clan -> Clan Information\n'
f'!player -> Player Infomation\n'
f'!warlog -> Warlog Infomation\n'
f'!joke -> Tells a random joke', post_key=post_key)
| 39.375 | 94 | 0.538624 | from routes.band import write_comment
class Help:
def __init__(self, get_all_post):
self.get_all_post = get_all_post
self.help_information()
def help_information(self):
get_all_post = self.get_all_post
post_response_content = get_all_post['result_data']['items']
for item in post_response_content:
commment_count = item['comment_count']
content = item['content']
post_key = item['post_key']
if '!help' in content and commment_count == 0:
write_comment(comment_body=f'Here are the list of bot commands:\n'
f'!clan -> Clan Information\n'
f'!player -> Player Infomation\n'
f'!warlog -> Warlog Infomation\n'
f'!joke -> Tells a random joke', post_key=post_key)
| true | true |
f726331ddb1b01708d32c521d8e3e991bbc3909a | 2,338 | py | Python | kubragen2/build.py | RangelReale/kubragen2 | 2118f1429a9b9da937582db1f41d4f12b78773e2 | [
"MIT"
] | 1 | 2022-02-14T07:31:57.000Z | 2022-02-14T07:31:57.000Z | kubragen2/build.py | RangelReale/kubragen2 | 2118f1429a9b9da937582db1f41d4f12b78773e2 | [
"MIT"
] | null | null | null | kubragen2/build.py | RangelReale/kubragen2 | 2118f1429a9b9da937582db1f41d4f12b78773e2 | [
"MIT"
] | null | null | null | import copy
from typing import Any, MutableMapping, MutableSequence, Union
from .data import DataGetValue, Data, BaseData
from .exception import InvalidOperationError
class DataBuilder:
def build_prop(self, data: Union[MutableMapping, MutableSequence], key: Any) -> None:
"""
Cleanup instances of Data class in Mapping or Sequence.
"""
if isinstance(data[key], BaseData):
if isinstance(data[key], Data):
if not data[key].is_enabled():
del data[key]
else:
data[key] = data[key].get_value()
else:
raise InvalidOperationError('Cannot use BaseData in build')
def build(self, data: Any, in_place: bool = True) -> Any:
"""
Cleanup all instances of Data classes, removing if not enabled or replacing by its value.
:param data: the data to mutate
:param in_place: whether to modify the data in-place. If False, data will be duplicated
using copy.deepcopy
:return: the same value passed, mutated, except if it is *Data{enabled=False}*, in this case it returns None.
"""
if not in_place:
data = copy.deepcopy(data)
if isinstance(data, MutableMapping):
keylist = list(data.keys())
for key in keylist:
self.build_prop(data, key)
for item in data.values():
self.build(item)
return data
elif isinstance(data, MutableSequence):
for key in range(len(data) - 1, -1, -1):
self.build_prop(data, key)
for item in data:
self.build(item)
return data
return self.get_value(data)
def get_value(self, data: Any) -> Any:
return DataGetValue(data)
def BuildData(data: Any, in_place: bool = True) -> Any:
"""
Cleanup all instances of Data classes, removing if not enabled or replacing by its value.
:param data: the data to mutate
:param in_place: whether to modify the data in-place. If False, data will be duplicated
using copy.deepcopy
:return: the same value passed, mutated, except if it is *Data{enabled=False}*, in this case it returns None.
"""
return DataBuilder().build(data, in_place=in_place)
| 37.709677 | 117 | 0.60864 | import copy
from typing import Any, MutableMapping, MutableSequence, Union
from .data import DataGetValue, Data, BaseData
from .exception import InvalidOperationError
class DataBuilder:
def build_prop(self, data: Union[MutableMapping, MutableSequence], key: Any) -> None:
if isinstance(data[key], BaseData):
if isinstance(data[key], Data):
if not data[key].is_enabled():
del data[key]
else:
data[key] = data[key].get_value()
else:
raise InvalidOperationError('Cannot use BaseData in build')
def build(self, data: Any, in_place: bool = True) -> Any:
if not in_place:
data = copy.deepcopy(data)
if isinstance(data, MutableMapping):
keylist = list(data.keys())
for key in keylist:
self.build_prop(data, key)
for item in data.values():
self.build(item)
return data
elif isinstance(data, MutableSequence):
for key in range(len(data) - 1, -1, -1):
self.build_prop(data, key)
for item in data:
self.build(item)
return data
return self.get_value(data)
def get_value(self, data: Any) -> Any:
return DataGetValue(data)
def BuildData(data: Any, in_place: bool = True) -> Any:
return DataBuilder().build(data, in_place=in_place)
| true | true |
f72633913ec545cd03a516a113ee6f370da07cd3 | 663 | py | Python | blogsrc/manage.py | mesutcifci/personal-blog | 11fca60e1dc628617c00bb01d55d2fac71d60603 | [
"MIT"
] | 1 | 2020-12-12T01:02:56.000Z | 2020-12-12T01:02:56.000Z | blogsrc/manage.py | mesutcifci/personal-blog | 11fca60e1dc628617c00bb01d55d2fac71d60603 | [
"MIT"
] | null | null | null | blogsrc/manage.py | mesutcifci/personal-blog | 11fca60e1dc628617c00bb01d55d2fac71d60603 | [
"MIT"
] | 1 | 2020-12-11T08:50:14.000Z | 2020-12-11T08:50:14.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blogsrc.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.826087 | 73 | 0.678733 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blogsrc.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f726350f5bcbf20607a3fc3fd9daee81853b87c0 | 5,551 | py | Python | enaml/qt/qt_color_dialog.py | pberkes/enaml | cbcbee929e3117dfe56c0b06dc2385acc832b0e8 | [
"BSD-3-Clause-Clear"
] | 11 | 2015-03-14T14:30:51.000Z | 2022-03-15T13:01:44.000Z | enaml/qt/qt_color_dialog.py | pberkes/enaml | cbcbee929e3117dfe56c0b06dc2385acc832b0e8 | [
"BSD-3-Clause-Clear"
] | 3 | 2015-01-31T11:12:56.000Z | 2022-03-14T00:53:25.000Z | enaml/qt/qt_color_dialog.py | pberkes/enaml | cbcbee929e3117dfe56c0b06dc2385acc832b0e8 | [
"BSD-3-Clause-Clear"
] | 4 | 2015-01-27T01:56:14.000Z | 2021-02-23T07:21:20.000Z | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Int, Typed
from enaml.colors import Color
from enaml.widgets.color_dialog import ProxyColorDialog
from .QtCore import Signal
from .QtGui import QColor, QColorDialog
from .qt_toolkit_dialog import QtToolkitDialog
def color_from_qcolor(q):
""" Convert a QColor into an Enaml Color.
Parameters
----------
q : QColor
The Qt color to convert to Enaml Color.
Returns
-------
result : Color or None
An Enaml Color or None if the QColor is not valid.
"""
if not q.isValid():
return None
return Color(q.red(), q.green(), q.blue(), q.alpha())
# Guard flags
CURRENT_GUARD = 0x1
class QColorDialogEx(QColorDialog):
""" A custom QColorDialog which emits a custom finished signal.
"""
#: A signal emitted at the end of the 'done' method. This works
#: around the standard QColorDialog behavior which emits the
#: 'colorSelected' signal *after* the 'finished' signal.
reallyFinished = Signal(int)
def done(self, result):
""" A reimplemented done method.
This method emits the 'reallyFinished' signal on completion.
"""
super(QColorDialogEx, self).done(result)
self.reallyFinished.emit(result)
class QtColorDialog(QtToolkitDialog, ProxyColorDialog):
""" A Qt implementation of an Enaml ProxyColorDialog.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QColorDialogEx)
#: Cyclic notification guard. This a bitfield of multiple guards.
_guard = Int(0)
def create_widget(self):
""" Create the underlying QColorDialog.
"""
self.widget = QColorDialogEx(self.parent_widget())
def init_widget(self):
""" Initialize the underlying widget.
"""
# Do not call super(...) as it connects the standard 'finished'
# signal. This widget uses the custom 'reallyFinished' signal.
d = self.declaration
self.set_title(d.title)
self.set_current_color(d.current_color)
self.set_show_alpha(d.show_alpha)
self.set_show_buttons(d.show_buttons)
widget = self.widget
widget.currentColorChanged.connect(self.on_current_color_changed)
widget.colorSelected.connect(self.on_color_selected)
widget.reallyFinished.connect(self.on_finished)
#--------------------------------------------------------------------------
# Utility Methods
#--------------------------------------------------------------------------
def get_default_title(self):
""" Get the default window title for the color dialog.
"""
return u'Select Color'
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def on_current_color_changed(self, qcolor):
""" Handle the 'currentColorChanged' signal from the widget.
"""
d = self.declaration
if d is not None:
self._guard |= CURRENT_GUARD
try:
d.current_color = color_from_qcolor(qcolor)
finally:
self._guard &= ~CURRENT_GUARD
def on_color_selected(self, qcolor):
""" Handle the 'colorSelected' signal from the widget.
"""
d = self.declaration
if d is not None:
d.selected_color = color_from_qcolor(qcolor)
#--------------------------------------------------------------------------
# ProxyColorDialog API
#--------------------------------------------------------------------------
@staticmethod
def custom_count():
""" Get the number of available custom colors.
"""
return QColorDialog.customCount()
@staticmethod
def custom_color(index):
""" Get the custom color for the given index.
"""
qrgb = QColorDialog.customColor(index)
return color_from_qcolor(QColor.fromRgba(qrgb))
@staticmethod
def set_custom_color(index, color):
""" Set the custom color for the given index.
"""
QColorDialog.setCustomColor(index, color.argb)
def set_current_color(self, color):
""" Set the current color for the underlying widget.
"""
if not self._guard & CURRENT_GUARD:
if color is not None:
qcolor = QColor.fromRgba(color.argb)
else:
qcolor = QColor()
self.widget.setCurrentColor(qcolor)
def set_show_alpha(self, show):
""" Set the show alpha option on the underlying widget.
"""
widget = self.widget
opt = widget.options()
if show:
opt |= QColorDialog.ShowAlphaChannel
else:
opt &= ~QColorDialog.ShowAlphaChannel
widget.setOptions(opt)
def set_show_buttons(self, show):
""" Set the show buttons option on the underlying widget.
"""
widget = self.widget
opt = widget.options()
if show:
opt &= ~QColorDialog.NoButtons
else:
opt |= QColorDialog.NoButtons
widget.setOptions(opt)
| 30.168478 | 79 | 0.558278 |
from atom.api import Int, Typed
from enaml.colors import Color
from enaml.widgets.color_dialog import ProxyColorDialog
from .QtCore import Signal
from .QtGui import QColor, QColorDialog
from .qt_toolkit_dialog import QtToolkitDialog
def color_from_qcolor(q):
if not q.isValid():
return None
return Color(q.red(), q.green(), q.blue(), q.alpha())
CURRENT_GUARD = 0x1
class QColorDialogEx(QColorDialog):
reallyFinished = Signal(int)
def done(self, result):
super(QColorDialogEx, self).done(result)
self.reallyFinished.emit(result)
class QtColorDialog(QtToolkitDialog, ProxyColorDialog):
widget = Typed(QColorDialogEx)
_guard = Int(0)
def create_widget(self):
self.widget = QColorDialogEx(self.parent_widget())
def init_widget(self):
d = self.declaration
self.set_title(d.title)
self.set_current_color(d.current_color)
self.set_show_alpha(d.show_alpha)
self.set_show_buttons(d.show_buttons)
widget = self.widget
widget.currentColorChanged.connect(self.on_current_color_changed)
widget.colorSelected.connect(self.on_color_selected)
widget.reallyFinished.connect(self.on_finished)
def get_default_title(self):
return u'Select Color'
def on_current_color_changed(self, qcolor):
d = self.declaration
if d is not None:
self._guard |= CURRENT_GUARD
try:
d.current_color = color_from_qcolor(qcolor)
finally:
self._guard &= ~CURRENT_GUARD
def on_color_selected(self, qcolor):
d = self.declaration
if d is not None:
d.selected_color = color_from_qcolor(qcolor)
@staticmethod
def custom_count():
return QColorDialog.customCount()
@staticmethod
def custom_color(index):
qrgb = QColorDialog.customColor(index)
return color_from_qcolor(QColor.fromRgba(qrgb))
@staticmethod
def set_custom_color(index, color):
QColorDialog.setCustomColor(index, color.argb)
def set_current_color(self, color):
if not self._guard & CURRENT_GUARD:
if color is not None:
qcolor = QColor.fromRgba(color.argb)
else:
qcolor = QColor()
self.widget.setCurrentColor(qcolor)
def set_show_alpha(self, show):
widget = self.widget
opt = widget.options()
if show:
opt |= QColorDialog.ShowAlphaChannel
else:
opt &= ~QColorDialog.ShowAlphaChannel
widget.setOptions(opt)
def set_show_buttons(self, show):
widget = self.widget
opt = widget.options()
if show:
opt &= ~QColorDialog.NoButtons
else:
opt |= QColorDialog.NoButtons
widget.setOptions(opt)
| true | true |
f726390a2fc316670d945fcf0adc8da8f7980de8 | 5,007 | py | Python | JSONLibrary/JSONLibraryKeywords.py | Rezzas/robotframework-jsonlibrary | d0db2b20f729e69e37364527ae60f7be22dff1d4 | [
"Unlicense"
] | null | null | null | JSONLibrary/JSONLibraryKeywords.py | Rezzas/robotframework-jsonlibrary | d0db2b20f729e69e37364527ae60f7be22dff1d4 | [
"Unlicense"
] | null | null | null | JSONLibrary/JSONLibraryKeywords.py | Rezzas/robotframework-jsonlibrary | d0db2b20f729e69e37364527ae60f7be22dff1d4 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import os.path
from robot.api import logger
from robot.api.deco import keyword
from jsonpath_rw import Index, Fields
#from jsonpath_rw_ext import parse
from jsonpath_ng.ext import parse
from .version import VERSION
__author__ = 'Traitanit Huangsri'
__email__ = 'traitanit.hua@gmail.com'
__version__ = VERSION
class JSONLibraryKeywords(object):
ROBOT_EXIT_ON_FAILURE = True
@keyword('Load JSON From File')
def load_json_from_file(self, file_name):
"""Load JSON from file.
Return json as a dictionary object.
Arguments:
- file_name: absolute json file name
Return json object (list or dictionary)
Examples:
| ${result}= | Load Json From File | /path/to/file.json |
"""
logger.debug("Check if file exists")
if os.path.isfile(file_name) is False:
logger.error("JSON file: " + file_name + " not found")
raise IOError
with open(file_name) as json_file:
data = json.load(json_file)
return data
@keyword('Add Object To Json')
def add_object_to_json(self, json_object, json_path, object_to_add):
"""Add an dictionary or list object to json object using json_path
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
- object_to_add: dictionary or list object to add to json_object which is matched by json_path
Return new json object.
Examples:
| ${dict}= | Create Dictionary | latitude=13.1234 | longitude=130.1234 |
| ${json}= | Add Object To Json | ${json} | $..address | ${dict} |
"""
json_path_expr = parse(json_path)
for match in json_path_expr.find(json_object):
if type(match.value) is dict:
match.value.update(object_to_add)
if type(match.value) is list:
match.value.append(object_to_add)
return json_object
@keyword('Get Value From Json')
def get_value_from_json(self, json_object, json_path):
"""Get Value From JSON using JSONPath
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
Return array of values
Examples:
| ${values}= | Get Value From Json | ${json} | $..phone_number |
"""
json_path_expr = parse(json_path)
return [match.value for match in json_path_expr.find(json_object)]
@keyword('Update Value To Json')
def update_value_to_json(self, json_object, json_path, new_value):
"""Update value to JSON using JSONPath
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
- new_value: value to update
Return new json_object
Examples:
| ${json_object}= | Update Value To Json | ${json} | $..address.streetAddress | Ratchadapisek Road |
"""
json_path_expr = parse(json_path)
for match in json_path_expr.find(json_object):
path = match.path
if isinstance(path, Index):
match.context.value[match.path.index] = new_value
elif isinstance(path, Fields):
match.context.value[match.path.fields[0]] = new_value
return json_object
@keyword('Delete Object From Json')
def delete_object_from_json(self, json_object, json_path):
"""Delete Object From JSON using json_path
Arguments:
- json_object: json as a dictionary object.
- json_path: jsonpath expression
Return new json_object
Examples:
| ${json_object}= | Delete Object From Json | ${json} | $..address.streetAddress |
"""
json_path_expr = parse(json_path)
for match in json_path_expr.find(json_object):
path = match.path
if isinstance(path, Index):
del(match.context.value[match.path.index])
elif isinstance(path, Fields):
del(match.context.value[match.path.fields[0]])
return json_object
@keyword('Convert JSON To String')
def convert_json_to_string(self, json_object):
"""Convert JSON object to string
Arguments:
- json_object: json as a dictionary object.
Return new json_string
Examples:
| ${json_str}= | Convert JSON To String | ${json_obj} |
"""
return json.dumps(json_object)
@keyword('Convert String to JSON')
def convert_string_to_json(self, json_string):
"""Convert String to JSON object
Arguments:
- json_string: JSON string
Return new json_object
Examples:
| ${json_object}= | Convert String to JSON | ${json_string} |
"""
return json.loads(json_string)
| 32.512987 | 113 | 0.607749 |
import json
import os.path
from robot.api import logger
from robot.api.deco import keyword
from jsonpath_rw import Index, Fields
from jsonpath_ng.ext import parse
from .version import VERSION
__author__ = 'Traitanit Huangsri'
__email__ = 'traitanit.hua@gmail.com'
__version__ = VERSION
class JSONLibraryKeywords(object):
ROBOT_EXIT_ON_FAILURE = True
@keyword('Load JSON From File')
def load_json_from_file(self, file_name):
logger.debug("Check if file exists")
if os.path.isfile(file_name) is False:
logger.error("JSON file: " + file_name + " not found")
raise IOError
with open(file_name) as json_file:
data = json.load(json_file)
return data
@keyword('Add Object To Json')
def add_object_to_json(self, json_object, json_path, object_to_add):
json_path_expr = parse(json_path)
for match in json_path_expr.find(json_object):
if type(match.value) is dict:
match.value.update(object_to_add)
if type(match.value) is list:
match.value.append(object_to_add)
return json_object
@keyword('Get Value From Json')
def get_value_from_json(self, json_object, json_path):
json_path_expr = parse(json_path)
return [match.value for match in json_path_expr.find(json_object)]
@keyword('Update Value To Json')
def update_value_to_json(self, json_object, json_path, new_value):
json_path_expr = parse(json_path)
for match in json_path_expr.find(json_object):
path = match.path
if isinstance(path, Index):
match.context.value[match.path.index] = new_value
elif isinstance(path, Fields):
match.context.value[match.path.fields[0]] = new_value
return json_object
@keyword('Delete Object From Json')
def delete_object_from_json(self, json_object, json_path):
json_path_expr = parse(json_path)
for match in json_path_expr.find(json_object):
path = match.path
if isinstance(path, Index):
del(match.context.value[match.path.index])
elif isinstance(path, Fields):
del(match.context.value[match.path.fields[0]])
return json_object
@keyword('Convert JSON To String')
def convert_json_to_string(self, json_object):
return json.dumps(json_object)
@keyword('Convert String to JSON')
def convert_string_to_json(self, json_string):
return json.loads(json_string)
| true | true |
f7263995555ef87a94a1d3d6fc18898696a9f9f5 | 4,522 | py | Python | plugin/entity/zettel.py | tbouska/mkdocs-zettelkasten | 3638ee8028462e98aa088dd075a929e11f6fe882 | [
"MIT"
] | null | null | null | plugin/entity/zettel.py | tbouska/mkdocs-zettelkasten | 3638ee8028462e98aa088dd075a929e11f6fe882 | [
"MIT"
] | 7 | 2021-11-25T07:59:16.000Z | 2021-11-29T18:37:29.000Z | plugin/entity/zettel.py | tbouska/mkdocs-zettelkasten | 3638ee8028462e98aa088dd075a929e11f6fe882 | [
"MIT"
] | null | null | null | import datetime
import os
import re
import yaml
from pathlib import Path
from plugin.patterns import WIKI_LINK, MD_LINK
from plugin.gitutil import GitUtil
class Zettel:
def __init__(self, abs_src_path):
self.id = 0
self.title = ""
self.path = abs_src_path
self.backlinks = []
self.links = []
self._parse_file()
def _parse_file(self):
header = []
is_reading_header = False
is_reading_body = False
alternative_title = ""
try:
with open(self.path, encoding="utf-8-sig", errors="strict") as f:
while line := f.readline():
if line.strip() == "---":
if not is_reading_header and not is_reading_body:
is_reading_header = True
continue
elif not is_reading_body:
is_reading_header = False
is_reading_body = True
continue
else:
break
if is_reading_header:
header.append(line)
if is_reading_body:
if line.lstrip().startswith("# "):
alternative_title = line.strip()[2:]
match_wiki_link = WIKI_LINK.finditer(line)
match_md_link = MD_LINK.finditer(line)
for m in match_wiki_link:
self.links.append(m.groupdict()["url"])
for m in match_md_link:
self.links.append(m.groupdict()["url"])
meta = yaml.load("".join(header), Loader=yaml.FullLoader)
except OSError:
raise ValueError("File is not in zettel format")
except ValueError:
raise ValueError("File is not in zettel format")
except AttributeError:
raise ValueError("File is not in zettel format")
self._set_metadata(meta, alternative_title)
def _set_metadata(self, meta, alternative_title):
if not meta or "id" not in meta.keys():
raise ValueError("File is not in zettel format")
else:
self.id = meta["id"]
if "title" in meta.keys():
self.title = meta["title"]
elif alternative_title:
print("Using alternative title " + self.path)
self.title = alternative_title
else:
self.title = self._get_title_from_filename()
self._set_last_update_date(meta)
def _get_title_from_filename(self):
title = Path(self.path).stem
title = re.sub(r"^\d{14}", "", title)
title = title.replace("_", " ").replace("-", " ")
title = " ".join(title.split())
title = title.strip()
title = title.capitalize()
return title
def _set_last_update_date(self, meta):
date = ""
if "last_update" in meta.keys():
date = _get_date_from_string(meta["last_update"])
if not date and "date" in meta.keys():
date = _get_date_from_string(meta["date"])
if not date:
date = _get_date_from_string(self.id)
if not date:
date = datetime.datetime.today()
if "//github.com" in self.path or "//gitlab.com" in self.path:
git = GitUtil()
revision_date = git.get_revision_date_for_file(self.path)
else:
revision_date = datetime.datetime.fromtimestamp(os.path.getmtime(self.path))
if revision_date.timestamp() > date.timestamp():
date = revision_date
self.last_update_date = date.strftime("%Y-%m-%d")
def add_backlinks(self, sources):
def is_valid(source):
return (
self.path != source.abs_src_path
and source not in self.backlinks
and source.is_zettel
)
self.backlinks = [s for s in sources if is_valid(s)]
def _get_date_from_string(string):
string = str(string)
try:
date = datetime.datetime.strptime(string, "%Y-%m-%d %H:%M:%S")
except ValueError:
try:
date = datetime.datetime.strptime(string, "%Y%m%d%H%M%S")
except ValueError:
try:
date = datetime.datetime.fromisoformat(string)
except ValueError:
date = ""
return date
| 31.84507 | 88 | 0.534277 | import datetime
import os
import re
import yaml
from pathlib import Path
from plugin.patterns import WIKI_LINK, MD_LINK
from plugin.gitutil import GitUtil
class Zettel:
def __init__(self, abs_src_path):
self.id = 0
self.title = ""
self.path = abs_src_path
self.backlinks = []
self.links = []
self._parse_file()
def _parse_file(self):
header = []
is_reading_header = False
is_reading_body = False
alternative_title = ""
try:
with open(self.path, encoding="utf-8-sig", errors="strict") as f:
while line := f.readline():
if line.strip() == "---":
if not is_reading_header and not is_reading_body:
is_reading_header = True
continue
elif not is_reading_body:
is_reading_header = False
is_reading_body = True
continue
else:
break
if is_reading_header:
header.append(line)
if is_reading_body:
if line.lstrip().startswith("# "):
alternative_title = line.strip()[2:]
match_wiki_link = WIKI_LINK.finditer(line)
match_md_link = MD_LINK.finditer(line)
for m in match_wiki_link:
self.links.append(m.groupdict()["url"])
for m in match_md_link:
self.links.append(m.groupdict()["url"])
meta = yaml.load("".join(header), Loader=yaml.FullLoader)
except OSError:
raise ValueError("File is not in zettel format")
except ValueError:
raise ValueError("File is not in zettel format")
except AttributeError:
raise ValueError("File is not in zettel format")
self._set_metadata(meta, alternative_title)
def _set_metadata(self, meta, alternative_title):
if not meta or "id" not in meta.keys():
raise ValueError("File is not in zettel format")
else:
self.id = meta["id"]
if "title" in meta.keys():
self.title = meta["title"]
elif alternative_title:
print("Using alternative title " + self.path)
self.title = alternative_title
else:
self.title = self._get_title_from_filename()
self._set_last_update_date(meta)
def _get_title_from_filename(self):
title = Path(self.path).stem
title = re.sub(r"^\d{14}", "", title)
title = title.replace("_", " ").replace("-", " ")
title = " ".join(title.split())
title = title.strip()
title = title.capitalize()
return title
def _set_last_update_date(self, meta):
date = ""
if "last_update" in meta.keys():
date = _get_date_from_string(meta["last_update"])
if not date and "date" in meta.keys():
date = _get_date_from_string(meta["date"])
if not date:
date = _get_date_from_string(self.id)
if not date:
date = datetime.datetime.today()
if "//github.com" in self.path or "//gitlab.com" in self.path:
git = GitUtil()
revision_date = git.get_revision_date_for_file(self.path)
else:
revision_date = datetime.datetime.fromtimestamp(os.path.getmtime(self.path))
if revision_date.timestamp() > date.timestamp():
date = revision_date
self.last_update_date = date.strftime("%Y-%m-%d")
def add_backlinks(self, sources):
def is_valid(source):
return (
self.path != source.abs_src_path
and source not in self.backlinks
and source.is_zettel
)
self.backlinks = [s for s in sources if is_valid(s)]
def _get_date_from_string(string):
string = str(string)
try:
date = datetime.datetime.strptime(string, "%Y-%m-%d %H:%M:%S")
except ValueError:
try:
date = datetime.datetime.strptime(string, "%Y%m%d%H%M%S")
except ValueError:
try:
date = datetime.datetime.fromisoformat(string)
except ValueError:
date = ""
return date
| true | true |
f72639a5ff51641e1d986f7c27245a78928e246f | 4,440 | py | Python | src/utils/data_loader.py | TheBlueHawk/RANLP21-70 | 3d329a6d385fac4e8664cb1bb88a29411befb767 | [
"MIT"
] | null | null | null | src/utils/data_loader.py | TheBlueHawk/RANLP21-70 | 3d329a6d385fac4e8664cb1bb88a29411befb767 | [
"MIT"
] | null | null | null | src/utils/data_loader.py | TheBlueHawk/RANLP21-70 | 3d329a6d385fac4e8664cb1bb88a29411befb767 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : data_loader.py
# @Time : Created at 2019-05-31
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import random
from torch.utils.data import Dataset, DataLoader
from utils.text_process import *
class GANDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class GenDataIter:
def __init__(self, samples, if_test_data=False, shuffle=None):
self.batch_size = cfg.batch_size
self.max_seq_len = cfg.max_seq_len
self.start_letter = cfg.start_letter
self.shuffle = cfg.data_shuffle if not shuffle else shuffle
if cfg.if_real_data:
self.word2idx_dict, self.idx2word_dict = load_dict(cfg.dataset)
if if_test_data: # used for the classifier
self.word2idx_dict, self.idx2word_dict = load_test_dict(cfg.dataset)
self.loader = DataLoader(
dataset=GANDataset(self.__read_data__(samples)),
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=True)
self.input = self._all_data_('input')
self.target = self._all_data_('target')
def __read_data__(self, samples):
"""
input: same as target, but start with start_letter.
"""
# global all_data
if isinstance(samples, torch.Tensor): # Tensor
inp, target = self.prepare(samples)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
elif isinstance(samples, str): # filename
inp, target = self.load_data(samples)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
else:
all_data = None
return all_data
def random_batch(self):
"""Randomly choose a batch from loader, please note that the data should not be shuffled."""
idx = random.randint(0, len(self.loader) - 1)
return list(self.loader)[idx]
def _all_data_(self, col):
return torch.cat([data[col].unsqueeze(0) for data in self.loader.dataset.data], 0)
@staticmethod
def prepare(samples, gpu=False):
"""Add start_letter to samples as inp, target same as samples"""
inp = torch.zeros(samples.size()).long()
target = samples
inp[:, 0] = cfg.start_letter
inp[:, 1:] = target[:, :cfg.max_seq_len - 1]
#print(f"dataloader inp: {inp[0][:]}")
#print(f"dataloader target: {target[0][:]}")
if gpu:
return inp.cuda(), target.cuda()
return inp, target
def load_data(self, filename):
"""Load real data from local file"""
self.tokens = get_tokenlized(filename)
samples_index = tokens_to_tensor(self.tokens, self.word2idx_dict)
return self.prepare(samples_index)
class DisDataIter:
def __init__(self, pos_samples, neg_samples, shuffle=None):
self.batch_size = cfg.batch_size
self.max_seq_len = cfg.max_seq_len
self.start_letter = cfg.start_letter
self.shuffle = cfg.data_shuffle if not shuffle else shuffle
self.loader = DataLoader(
dataset=GANDataset(self.__read_data__(pos_samples, neg_samples)),
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=True)
def __read_data__(self, pos_samples, neg_samples):
"""
input: same as target, but start with start_letter.
"""
inp, target = self.prepare(pos_samples, neg_samples)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
return all_data
def random_batch(self):
idx = random.randint(0, len(self.loader) - 1)
return list(self.loader)[idx]
def prepare(self, pos_samples, neg_samples, gpu=False):
"""Build inp and target"""
inp = torch.cat((pos_samples, neg_samples), dim=0).long().detach() # !!!need .detach()
target = torch.ones(inp.size(0)).long()
target[pos_samples.size(0):] = 0
# shuffle
perm = torch.randperm(inp.size(0))
inp = inp[perm]
target = target[perm]
if gpu:
return inp.cuda(), target.cuda()
return inp, target
| 33.89313 | 100 | 0.611486 |
import random
from torch.utils.data import Dataset, DataLoader
from utils.text_process import *
class GANDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class GenDataIter:
def __init__(self, samples, if_test_data=False, shuffle=None):
self.batch_size = cfg.batch_size
self.max_seq_len = cfg.max_seq_len
self.start_letter = cfg.start_letter
self.shuffle = cfg.data_shuffle if not shuffle else shuffle
if cfg.if_real_data:
self.word2idx_dict, self.idx2word_dict = load_dict(cfg.dataset)
if if_test_data:
self.word2idx_dict, self.idx2word_dict = load_test_dict(cfg.dataset)
self.loader = DataLoader(
dataset=GANDataset(self.__read_data__(samples)),
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=True)
self.input = self._all_data_('input')
self.target = self._all_data_('target')
def __read_data__(self, samples):
if isinstance(samples, torch.Tensor):
inp, target = self.prepare(samples)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
elif isinstance(samples, str):
inp, target = self.load_data(samples)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
else:
all_data = None
return all_data
def random_batch(self):
idx = random.randint(0, len(self.loader) - 1)
return list(self.loader)[idx]
def _all_data_(self, col):
return torch.cat([data[col].unsqueeze(0) for data in self.loader.dataset.data], 0)
@staticmethod
def prepare(samples, gpu=False):
inp = torch.zeros(samples.size()).long()
target = samples
inp[:, 0] = cfg.start_letter
inp[:, 1:] = target[:, :cfg.max_seq_len - 1]
if gpu:
return inp.cuda(), target.cuda()
return inp, target
def load_data(self, filename):
self.tokens = get_tokenlized(filename)
samples_index = tokens_to_tensor(self.tokens, self.word2idx_dict)
return self.prepare(samples_index)
class DisDataIter:
def __init__(self, pos_samples, neg_samples, shuffle=None):
self.batch_size = cfg.batch_size
self.max_seq_len = cfg.max_seq_len
self.start_letter = cfg.start_letter
self.shuffle = cfg.data_shuffle if not shuffle else shuffle
self.loader = DataLoader(
dataset=GANDataset(self.__read_data__(pos_samples, neg_samples)),
batch_size=self.batch_size,
shuffle=self.shuffle,
drop_last=True)
def __read_data__(self, pos_samples, neg_samples):
inp, target = self.prepare(pos_samples, neg_samples)
all_data = [{'input': i, 'target': t} for (i, t) in zip(inp, target)]
return all_data
def random_batch(self):
idx = random.randint(0, len(self.loader) - 1)
return list(self.loader)[idx]
def prepare(self, pos_samples, neg_samples, gpu=False):
inp = torch.cat((pos_samples, neg_samples), dim=0).long().detach()
target = torch.ones(inp.size(0)).long()
target[pos_samples.size(0):] = 0
perm = torch.randperm(inp.size(0))
inp = inp[perm]
target = target[perm]
if gpu:
return inp.cuda(), target.cuda()
return inp, target
| true | true |
f7263b27e2f8e5f7ffa8f4a9687f2edbb569a9d1 | 598 | py | Python | plotly/validators/mesh3d/colorbar/tickfont/_family.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/mesh3d/colorbar/tickfont/_family.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/mesh3d/colorbar/tickfont/_family.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='mesh3d.colorbar.tickfont',
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
no_blank=kwargs.pop('no_blank', True),
role=kwargs.pop('role', 'style'),
strict=kwargs.pop('strict', True),
**kwargs
)
| 28.47619 | 68 | 0.602007 | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='mesh3d.colorbar.tickfont',
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
no_blank=kwargs.pop('no_blank', True),
role=kwargs.pop('role', 'style'),
strict=kwargs.pop('strict', True),
**kwargs
)
| true | true |
f7263b5df5fb3177603ae56acb9d953605d88e9b | 1,670 | py | Python | dtypes/radix_sort.py | jay-tyler/data-structures | b4f4bcb091cf4be4c4cc29d8a687af3d063090f5 | [
"MIT"
] | 2 | 2015-08-25T02:51:47.000Z | 2019-11-03T20:00:16.000Z | dtypes/radix_sort.py | jay-tyler/data-structures | b4f4bcb091cf4be4c4cc29d8a687af3d063090f5 | [
"MIT"
] | 9 | 2015-09-19T20:51:14.000Z | 2015-09-28T07:06:50.000Z | dtypes/radix_sort.py | jay-tyler/data-structures | b4f4bcb091cf4be4c4cc29d8a687af3d063090f5 | [
"MIT"
] | 1 | 2020-04-22T21:24:36.000Z | 2020-04-22T21:24:36.000Z | def radsort(unslist):
"""Returns a sorted list. Accepts only a list containing positive
integers."""
# find max for iterative solution
maxval = max(unslist)
ntimes = len(str(maxval))
slist = unslist[:]
for n in range(ntimes):
# Making radix bins
bins = [[] for _ in range(10)]
# Place each list item in appropriate bin
for i, item in enumerate(slist):
inspecting = slist[i]
digval = _get_nth_digit(inspecting, n)
bins[digval].append(inspecting)
slist = []
# Flatten bins to list
for bin in bins:
slist.extend(bin)
return slist
def _get_nth_digit(num, n):
"""For a positive integer, get the value at the nth digit;
indexing starts at 0"""
return ((num % (10 ** (n + 1))) - (num % (10 ** n))) // 10 ** n
if __name__ == "__main__":
"""Test time performance for best and worst cases"""
import time
size = 1000
# Best case: when all numbers in the list have the same number of digits.
good_list = range(size + 1)
start = time.time()
for i in range(1000):
radsort(good_list)
stop = time.time()
best_time = (stop - start)
# Worst case: When there is one very large outlier.
bad_list = [1 for _ in range(size)] + [10**10]
start = time.time()
for i in range(1000):
radsort(bad_list)
stop = time.time()
worst_time = (stop - start)
print "Best case is {} times better than worst for n=1000\n".format(
worst_time/best_time)
print "Best case: {0:.{1}f} ms\nWorst case: {2:.{3}f} ms".format(
best_time, 5, worst_time, 5)
| 27.377049 | 77 | 0.58982 | def radsort(unslist):
"""Returns a sorted list. Accepts only a list containing positive
integers."""
maxval = max(unslist)
ntimes = len(str(maxval))
slist = unslist[:]
for n in range(ntimes):
bins = [[] for _ in range(10)]
for i, item in enumerate(slist):
inspecting = slist[i]
digval = _get_nth_digit(inspecting, n)
bins[digval].append(inspecting)
slist = []
for bin in bins:
slist.extend(bin)
return slist
def _get_nth_digit(num, n):
"""For a positive integer, get the value at the nth digit;
indexing starts at 0"""
return ((num % (10 ** (n + 1))) - (num % (10 ** n))) // 10 ** n
if __name__ == "__main__":
"""Test time performance for best and worst cases"""
import time
size = 1000
good_list = range(size + 1)
start = time.time()
for i in range(1000):
radsort(good_list)
stop = time.time()
best_time = (stop - start)
bad_list = [1 for _ in range(size)] + [10**10]
start = time.time()
for i in range(1000):
radsort(bad_list)
stop = time.time()
worst_time = (stop - start)
print "Best case is {} times better than worst for n=1000\n".format(
worst_time/best_time)
print "Best case: {0:.{1}f} ms\nWorst case: {2:.{3}f} ms".format(
best_time, 5, worst_time, 5)
| false | true |
f7263ccf5a3a003b21440cf38813ca5d254016b4 | 15,637 | py | Python | meshrcnn/modeling/roi_heads/roi_heads.py | hsk9767/mesh_rcnn_copy | 6dd4d9ea8af33c03a084e34c7d16eeaddfe924ae | [
"BSD-3-Clause"
] | 7 | 2020-03-06T20:49:36.000Z | 2022-03-09T11:09:31.000Z | meshrcnn/modeling/roi_heads/roi_heads.py | hsk9767/mesh_rcnn_copy | 6dd4d9ea8af33c03a084e34c7d16eeaddfe924ae | [
"BSD-3-Clause"
] | null | null | null | meshrcnn/modeling/roi_heads/roi_heads.py | hsk9767/mesh_rcnn_copy | 6dd4d9ea8af33c03a084e34c7d16eeaddfe924ae | [
"BSD-3-Clause"
] | 2 | 2020-04-14T02:14:25.000Z | 2020-05-06T14:35:41.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict
import torch
from detectron2.layers import ShapeSpec, cat
from detectron2.modeling import ROI_HEADS_REGISTRY
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs
from detectron2.modeling.roi_heads.roi_heads import StandardROIHeads, select_foreground_proposals
from pytorch3d.ops import cubify
from pytorch3d.structures import Meshes
from pytorch3d.utils import ico_sphere
from meshrcnn.modeling.roi_heads.mask_head import mask_rcnn_loss
from meshrcnn.modeling.roi_heads.mesh_head import (
build_mesh_head,
mesh_rcnn_inference,
mesh_rcnn_loss,
)
from meshrcnn.modeling.roi_heads.voxel_head import (
build_voxel_head,
voxel_rcnn_inference,
voxel_rcnn_loss,
)
from meshrcnn.modeling.roi_heads.z_head import build_z_head, z_rcnn_inference, z_rcnn_loss
from meshrcnn.utils import vis as vis_utils
@ROI_HEADS_REGISTRY.register()
class MeshRCNNROIHeads(StandardROIHeads):
"""
The ROI specific heads for Mesh R-CNN
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__(cfg, input_shape)
self._init_z_head(cfg, input_shape)
self._init_voxel_head(cfg, input_shape)
self._init_mesh_head(cfg, input_shape)
# If MODEL.VIS_MINIBATCH is True we store minibatch targets
# for visualization purposes
self._vis = cfg.MODEL.VIS_MINIBATCH
self._misc = {}
self._vis_dir = cfg.OUTPUT_DIR
def _init_z_head(self, cfg, input_shape):
# fmt: off
self.zpred_on = cfg.MODEL.ZPRED_ON
if not self.zpred_on:
return
z_pooler_resolution = cfg.MODEL.ROI_Z_HEAD.POOLER_RESOLUTION
z_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
z_sampling_ratio = cfg.MODEL.ROI_Z_HEAD.POOLER_SAMPLING_RATIO
z_pooler_type = cfg.MODEL.ROI_Z_HEAD.POOLER_TYPE
# fmt: on
self.z_loss_weight = cfg.MODEL.ROI_Z_HEAD.Z_REG_WEIGHT
self.z_smooth_l1_beta = cfg.MODEL.ROI_Z_HEAD.SMOOTH_L1_BETA
in_channels = [input_shape[f].channels for f in self.in_features][0]
self.z_pooler = ROIPooler(
output_size=z_pooler_resolution,
scales=z_pooler_scales,
sampling_ratio=z_sampling_ratio,
pooler_type=z_pooler_type,
)
shape = ShapeSpec(
channels=in_channels, width=z_pooler_resolution, height=z_pooler_resolution
)
self.z_head = build_z_head(cfg, shape)
def _init_voxel_head(self, cfg, input_shape):
# fmt: off
self.voxel_on = cfg.MODEL.VOXEL_ON
if not self.voxel_on:
return
voxel_pooler_resolution = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_RESOLUTION
voxel_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
voxel_sampling_ratio = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_SAMPLING_RATIO
voxel_pooler_type = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_TYPE
# fmt: on
self.voxel_loss_weight = cfg.MODEL.ROI_VOXEL_HEAD.LOSS_WEIGHT
self.cls_agnostic_voxel = cfg.MODEL.ROI_VOXEL_HEAD.CLS_AGNOSTIC_VOXEL
self.cubify_thresh = cfg.MODEL.ROI_VOXEL_HEAD.CUBIFY_THRESH
in_channels = [input_shape[f].channels for f in self.in_features][0]
self.voxel_pooler = ROIPooler(
output_size=voxel_pooler_resolution,
scales=voxel_pooler_scales,
sampling_ratio=voxel_sampling_ratio,
pooler_type=voxel_pooler_type,
)
shape = ShapeSpec(
channels=in_channels, width=voxel_pooler_resolution, height=voxel_pooler_resolution
)
self.voxel_head = build_voxel_head(cfg, shape)
def _init_mesh_head(self, cfg, input_shape):
# fmt: off
self.mesh_on = cfg.MODEL.MESH_ON
if not self.mesh_on:
return
mesh_pooler_resolution = cfg.MODEL.ROI_MESH_HEAD.POOLER_RESOLUTION
mesh_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
mesh_sampling_ratio = cfg.MODEL.ROI_MESH_HEAD.POOLER_SAMPLING_RATIO
mesh_pooler_type = cfg.MODEL.ROI_MESH_HEAD.POOLER_TYPE
# fmt: on
self.chamfer_loss_weight = cfg.MODEL.ROI_MESH_HEAD.CHAMFER_LOSS_WEIGHT
self.normals_loss_weight = cfg.MODEL.ROI_MESH_HEAD.NORMALS_LOSS_WEIGHT
self.edge_loss_weight = cfg.MODEL.ROI_MESH_HEAD.EDGE_LOSS_WEIGHT
self.gt_num_samples = cfg.MODEL.ROI_MESH_HEAD.GT_NUM_SAMPLES
self.pred_num_samples = cfg.MODEL.ROI_MESH_HEAD.PRED_NUM_SAMPLES
self.gt_coord_thresh = cfg.MODEL.ROI_MESH_HEAD.GT_COORD_THRESH
self.ico_sphere_level = cfg.MODEL.ROI_MESH_HEAD.ICO_SPHERE_LEVEL
in_channels = [input_shape[f].channels for f in self.in_features][0]
self.mesh_pooler = ROIPooler(
output_size=mesh_pooler_resolution,
scales=mesh_pooler_scales,
sampling_ratio=mesh_sampling_ratio,
pooler_type=mesh_pooler_type,
)
self.mesh_head = build_mesh_head(
cfg,
ShapeSpec(
channels=in_channels, height=mesh_pooler_resolution, width=mesh_pooler_resolution
),
)
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
if self._vis:
self._misc["images"] = images
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self._vis:
self._misc["proposals"] = proposals
if self.training:
losses = self._forward_box(features, proposals)
# During training the proposals used by the box head are
# used by the z, mask, voxel & mesh head.
losses.update(self._forward_z(features, proposals))
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_shape(features, proposals))
# print minibatch examples
if self._vis:
vis_utils.visualize_minibatch(self._misc["images"], self._misc, self._vis_dir, True)
return [], losses
else:
pred_instances = self._forward_box(features, proposals)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def forward_with_given_boxes(self, features, instances):
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (Instances): the same `Instances` object, with extra
fields such as `pred_masks` or `pred_voxels`.
"""
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
instances = self._forward_z(features, instances)
instances = self._forward_mask(features, instances)
instances = self._forward_shape(features, instances)
return instances
def _forward_z(self, features, instances):
"""
Forward logic of the z prediction branch.
"""
if not self.zpred_on:
return {} if self.training else instances
features = [features[f] for f in self.in_features]
if self.training:
# The loss is only defined on positive proposals.
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
z_features = self.z_pooler(features, proposal_boxes)
z_pred = self.z_head(z_features)
src_boxes = cat([p.tensor for p in proposal_boxes])
loss_z_reg = z_rcnn_loss(
z_pred,
proposals,
src_boxes,
loss_weight=self.z_loss_weight,
smooth_l1_beta=self.z_smooth_l1_beta,
)
return {"loss_z_reg": loss_z_reg}
else:
pred_boxes = [x.pred_boxes for x in instances]
z_features = self.z_pooler(features, pred_boxes)
z_pred = self.z_head(z_features)
z_rcnn_inference(z_pred, instances)
return instances
def _forward_mask(self, features, instances):
"""
Forward logic of the mask prediction branch.
Args:
features (dict[str,Tensor]): mapping from names to backbone features
instances (list[Instances]): the per-image instances to train/predict masks.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_masks" and return it.
"""
if not self.mask_on:
return {} if self.training else instances
features = [features[f] for f in self.in_features]
if self.training:
# The loss is only defined on positive proposals.
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
mask_features = self.mask_pooler(features, proposal_boxes)
mask_logits = self.mask_head.layers(mask_features)
loss_mask, target_masks = mask_rcnn_loss(mask_logits, proposals)
if self._vis:
self._misc["target_masks"] = target_masks
self._misc["fg_proposals"] = proposals
return {"loss_mask": loss_mask}
else:
pred_boxes = [x.pred_boxes for x in instances]
mask_features = self.mask_pooler(features, pred_boxes)
return self.mask_head(mask_features, instances)
def _forward_shape(self, features, instances):
"""
Forward logic for the voxel and mesh refinement branch.
Args:
features (list[Tensor]): #level input features for voxel prediction
instances (list[Instances]): the per-image instances to train/predict meshes.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_voxels" & "pred_meshes" and return it.
"""
if not self.voxel_on and not self.mesh_on:
return {} if self.training else instances
features = [features[f] for f in self.in_features]
if self.training:
# The loss is only defined on positive proposals.
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
losses = {}
if self.voxel_on:
voxel_features = self.voxel_pooler(features, proposal_boxes)
voxel_logits = self.voxel_head(voxel_features)
loss_voxel, target_voxels = voxel_rcnn_loss(
voxel_logits, proposals, loss_weight=self.voxel_loss_weight
)
losses.update({"loss_voxel": loss_voxel})
if self._vis:
self._misc["target_voxels"] = target_voxels
if self.cls_agnostic_voxel:
with torch.no_grad():
vox_in = voxel_logits.sigmoid().squeeze(1) # (N, V, V, V)
init_mesh = cubify(vox_in, self.cubify_thresh) # 1
else:
raise ValueError("No support for class specific predictions")
if self.mesh_on:
mesh_features = self.mesh_pooler(features, proposal_boxes)
if not self.voxel_on:
if mesh_features.shape[0] > 0:
init_mesh = ico_sphere(self.ico_sphere_level, mesh_features.device)
init_mesh = init_mesh.extend(mesh_features.shape[0])
else:
init_mesh = Meshes(verts=[], faces=[])
pred_meshes = self.mesh_head(mesh_features, init_mesh)
# loss weights
loss_weights = {
"chamfer": self.chamfer_loss_weight,
"normals": self.normals_loss_weight,
"edge": self.edge_loss_weight,
}
if not pred_meshes[0].isempty():
loss_chamfer, loss_normals, loss_edge, target_meshes = mesh_rcnn_loss(
pred_meshes,
proposals,
loss_weights=loss_weights,
gt_num_samples=self.gt_num_samples,
pred_num_samples=self.pred_num_samples,
gt_coord_thresh=self.gt_coord_thresh,
)
if self._vis:
self._misc["init_meshes"] = init_mesh
self._misc["target_meshes"] = target_meshes
else:
loss_chamfer = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0
loss_normals = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0
loss_edge = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0
losses.update(
{
"loss_chamfer": loss_chamfer,
"loss_normals": loss_normals,
"loss_edge": loss_edge,
}
)
return losses
else:
pred_boxes = [x.pred_boxes for x in instances]
if self.voxel_on:
voxel_features = self.voxel_pooler(features, pred_boxes)
voxel_logits = self.voxel_head(voxel_features)
voxel_rcnn_inference(voxel_logits, instances)
if self.cls_agnostic_voxel:
with torch.no_grad():
vox_in = voxel_logits.sigmoid().squeeze(1) # (N, V, V, V)
init_mesh = cubify(vox_in, self.cubify_thresh) # 1
else:
raise ValueError("No support for class specific predictions")
if self.mesh_on:
mesh_features = self.mesh_pooler(features, pred_boxes)
if not self.voxel_on:
if mesh_features.shape[0] > 0:
init_mesh = ico_sphere(self.ico_sphere_level, mesh_features.device)
init_mesh = init_mesh.extend(mesh_features.shape[0])
else:
init_mesh = Meshes(verts=[], faces=[])
pred_meshes = self.mesh_head(mesh_features, init_mesh)
mesh_rcnn_inference(pred_meshes[-1], instances)
else:
assert self.voxel_on
mesh_rcnn_inference(init_mesh, instances)
return instances
| 42.841096 | 105 | 0.616039 |
from typing import Dict
import torch
from detectron2.layers import ShapeSpec, cat
from detectron2.modeling import ROI_HEADS_REGISTRY
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs
from detectron2.modeling.roi_heads.roi_heads import StandardROIHeads, select_foreground_proposals
from pytorch3d.ops import cubify
from pytorch3d.structures import Meshes
from pytorch3d.utils import ico_sphere
from meshrcnn.modeling.roi_heads.mask_head import mask_rcnn_loss
from meshrcnn.modeling.roi_heads.mesh_head import (
build_mesh_head,
mesh_rcnn_inference,
mesh_rcnn_loss,
)
from meshrcnn.modeling.roi_heads.voxel_head import (
build_voxel_head,
voxel_rcnn_inference,
voxel_rcnn_loss,
)
from meshrcnn.modeling.roi_heads.z_head import build_z_head, z_rcnn_inference, z_rcnn_loss
from meshrcnn.utils import vis as vis_utils
@ROI_HEADS_REGISTRY.register()
class MeshRCNNROIHeads(StandardROIHeads):
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__(cfg, input_shape)
self._init_z_head(cfg, input_shape)
self._init_voxel_head(cfg, input_shape)
self._init_mesh_head(cfg, input_shape)
self._vis = cfg.MODEL.VIS_MINIBATCH
self._misc = {}
self._vis_dir = cfg.OUTPUT_DIR
def _init_z_head(self, cfg, input_shape):
self.zpred_on = cfg.MODEL.ZPRED_ON
if not self.zpred_on:
return
z_pooler_resolution = cfg.MODEL.ROI_Z_HEAD.POOLER_RESOLUTION
z_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
z_sampling_ratio = cfg.MODEL.ROI_Z_HEAD.POOLER_SAMPLING_RATIO
z_pooler_type = cfg.MODEL.ROI_Z_HEAD.POOLER_TYPE
self.z_loss_weight = cfg.MODEL.ROI_Z_HEAD.Z_REG_WEIGHT
self.z_smooth_l1_beta = cfg.MODEL.ROI_Z_HEAD.SMOOTH_L1_BETA
in_channels = [input_shape[f].channels for f in self.in_features][0]
self.z_pooler = ROIPooler(
output_size=z_pooler_resolution,
scales=z_pooler_scales,
sampling_ratio=z_sampling_ratio,
pooler_type=z_pooler_type,
)
shape = ShapeSpec(
channels=in_channels, width=z_pooler_resolution, height=z_pooler_resolution
)
self.z_head = build_z_head(cfg, shape)
def _init_voxel_head(self, cfg, input_shape):
self.voxel_on = cfg.MODEL.VOXEL_ON
if not self.voxel_on:
return
voxel_pooler_resolution = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_RESOLUTION
voxel_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
voxel_sampling_ratio = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_SAMPLING_RATIO
voxel_pooler_type = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_TYPE
self.voxel_loss_weight = cfg.MODEL.ROI_VOXEL_HEAD.LOSS_WEIGHT
self.cls_agnostic_voxel = cfg.MODEL.ROI_VOXEL_HEAD.CLS_AGNOSTIC_VOXEL
self.cubify_thresh = cfg.MODEL.ROI_VOXEL_HEAD.CUBIFY_THRESH
in_channels = [input_shape[f].channels for f in self.in_features][0]
self.voxel_pooler = ROIPooler(
output_size=voxel_pooler_resolution,
scales=voxel_pooler_scales,
sampling_ratio=voxel_sampling_ratio,
pooler_type=voxel_pooler_type,
)
shape = ShapeSpec(
channels=in_channels, width=voxel_pooler_resolution, height=voxel_pooler_resolution
)
self.voxel_head = build_voxel_head(cfg, shape)
def _init_mesh_head(self, cfg, input_shape):
self.mesh_on = cfg.MODEL.MESH_ON
if not self.mesh_on:
return
mesh_pooler_resolution = cfg.MODEL.ROI_MESH_HEAD.POOLER_RESOLUTION
mesh_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
mesh_sampling_ratio = cfg.MODEL.ROI_MESH_HEAD.POOLER_SAMPLING_RATIO
mesh_pooler_type = cfg.MODEL.ROI_MESH_HEAD.POOLER_TYPE
self.chamfer_loss_weight = cfg.MODEL.ROI_MESH_HEAD.CHAMFER_LOSS_WEIGHT
self.normals_loss_weight = cfg.MODEL.ROI_MESH_HEAD.NORMALS_LOSS_WEIGHT
self.edge_loss_weight = cfg.MODEL.ROI_MESH_HEAD.EDGE_LOSS_WEIGHT
self.gt_num_samples = cfg.MODEL.ROI_MESH_HEAD.GT_NUM_SAMPLES
self.pred_num_samples = cfg.MODEL.ROI_MESH_HEAD.PRED_NUM_SAMPLES
self.gt_coord_thresh = cfg.MODEL.ROI_MESH_HEAD.GT_COORD_THRESH
self.ico_sphere_level = cfg.MODEL.ROI_MESH_HEAD.ICO_SPHERE_LEVEL
in_channels = [input_shape[f].channels for f in self.in_features][0]
self.mesh_pooler = ROIPooler(
output_size=mesh_pooler_resolution,
scales=mesh_pooler_scales,
sampling_ratio=mesh_sampling_ratio,
pooler_type=mesh_pooler_type,
)
self.mesh_head = build_mesh_head(
cfg,
ShapeSpec(
channels=in_channels, height=mesh_pooler_resolution, width=mesh_pooler_resolution
),
)
def forward(self, images, features, proposals, targets=None):
if self._vis:
self._misc["images"] = images
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self._vis:
self._misc["proposals"] = proposals
if self.training:
losses = self._forward_box(features, proposals)
losses.update(self._forward_z(features, proposals))
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_shape(features, proposals))
if self._vis:
vis_utils.visualize_minibatch(self._misc["images"], self._misc, self._vis_dir, True)
return [], losses
else:
pred_instances = self._forward_box(features, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def forward_with_given_boxes(self, features, instances):
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
instances = self._forward_z(features, instances)
instances = self._forward_mask(features, instances)
instances = self._forward_shape(features, instances)
return instances
def _forward_z(self, features, instances):
if not self.zpred_on:
return {} if self.training else instances
features = [features[f] for f in self.in_features]
if self.training:
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
z_features = self.z_pooler(features, proposal_boxes)
z_pred = self.z_head(z_features)
src_boxes = cat([p.tensor for p in proposal_boxes])
loss_z_reg = z_rcnn_loss(
z_pred,
proposals,
src_boxes,
loss_weight=self.z_loss_weight,
smooth_l1_beta=self.z_smooth_l1_beta,
)
return {"loss_z_reg": loss_z_reg}
else:
pred_boxes = [x.pred_boxes for x in instances]
z_features = self.z_pooler(features, pred_boxes)
z_pred = self.z_head(z_features)
z_rcnn_inference(z_pred, instances)
return instances
def _forward_mask(self, features, instances):
if not self.mask_on:
return {} if self.training else instances
features = [features[f] for f in self.in_features]
if self.training:
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
mask_features = self.mask_pooler(features, proposal_boxes)
mask_logits = self.mask_head.layers(mask_features)
loss_mask, target_masks = mask_rcnn_loss(mask_logits, proposals)
if self._vis:
self._misc["target_masks"] = target_masks
self._misc["fg_proposals"] = proposals
return {"loss_mask": loss_mask}
else:
pred_boxes = [x.pred_boxes for x in instances]
mask_features = self.mask_pooler(features, pred_boxes)
return self.mask_head(mask_features, instances)
def _forward_shape(self, features, instances):
if not self.voxel_on and not self.mesh_on:
return {} if self.training else instances
features = [features[f] for f in self.in_features]
if self.training:
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
losses = {}
if self.voxel_on:
voxel_features = self.voxel_pooler(features, proposal_boxes)
voxel_logits = self.voxel_head(voxel_features)
loss_voxel, target_voxels = voxel_rcnn_loss(
voxel_logits, proposals, loss_weight=self.voxel_loss_weight
)
losses.update({"loss_voxel": loss_voxel})
if self._vis:
self._misc["target_voxels"] = target_voxels
if self.cls_agnostic_voxel:
with torch.no_grad():
vox_in = voxel_logits.sigmoid().squeeze(1)
init_mesh = cubify(vox_in, self.cubify_thresh)
else:
raise ValueError("No support for class specific predictions")
if self.mesh_on:
mesh_features = self.mesh_pooler(features, proposal_boxes)
if not self.voxel_on:
if mesh_features.shape[0] > 0:
init_mesh = ico_sphere(self.ico_sphere_level, mesh_features.device)
init_mesh = init_mesh.extend(mesh_features.shape[0])
else:
init_mesh = Meshes(verts=[], faces=[])
pred_meshes = self.mesh_head(mesh_features, init_mesh)
loss_weights = {
"chamfer": self.chamfer_loss_weight,
"normals": self.normals_loss_weight,
"edge": self.edge_loss_weight,
}
if not pred_meshes[0].isempty():
loss_chamfer, loss_normals, loss_edge, target_meshes = mesh_rcnn_loss(
pred_meshes,
proposals,
loss_weights=loss_weights,
gt_num_samples=self.gt_num_samples,
pred_num_samples=self.pred_num_samples,
gt_coord_thresh=self.gt_coord_thresh,
)
if self._vis:
self._misc["init_meshes"] = init_mesh
self._misc["target_meshes"] = target_meshes
else:
loss_chamfer = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0
loss_normals = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0
loss_edge = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0
losses.update(
{
"loss_chamfer": loss_chamfer,
"loss_normals": loss_normals,
"loss_edge": loss_edge,
}
)
return losses
else:
pred_boxes = [x.pred_boxes for x in instances]
if self.voxel_on:
voxel_features = self.voxel_pooler(features, pred_boxes)
voxel_logits = self.voxel_head(voxel_features)
voxel_rcnn_inference(voxel_logits, instances)
if self.cls_agnostic_voxel:
with torch.no_grad():
vox_in = voxel_logits.sigmoid().squeeze(1)
init_mesh = cubify(vox_in, self.cubify_thresh)
else:
raise ValueError("No support for class specific predictions")
if self.mesh_on:
mesh_features = self.mesh_pooler(features, pred_boxes)
if not self.voxel_on:
if mesh_features.shape[0] > 0:
init_mesh = ico_sphere(self.ico_sphere_level, mesh_features.device)
init_mesh = init_mesh.extend(mesh_features.shape[0])
else:
init_mesh = Meshes(verts=[], faces=[])
pred_meshes = self.mesh_head(mesh_features, init_mesh)
mesh_rcnn_inference(pred_meshes[-1], instances)
else:
assert self.voxel_on
mesh_rcnn_inference(init_mesh, instances)
return instances
| true | true |
f7263d836807e256e08d31435e58c1ede32bac68 | 718 | py | Python | Class1/ex7_YAML_JSON_read.py | karimjamali/Class-1 | 64a97e143dd0a9bd0e177481a6a858638bfd3766 | [
"Apache-2.0"
] | null | null | null | Class1/ex7_YAML_JSON_read.py | karimjamali/Class-1 | 64a97e143dd0a9bd0e177481a6a858638bfd3766 | [
"Apache-2.0"
] | null | null | null | Class1/ex7_YAML_JSON_read.py | karimjamali/Class-1 | 64a97e143dd0a9bd0e177481a6a858638bfd3766 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env/python
import yaml
import json
from pprint import pprint
def output_format(my_list, my_str):
# Make the output format easier to read
# '''
print "Converting from " , my_str ,"FORMAT"
print "#" * 30
pprint (my_list)
def main():
#my_dict={'karim':'32','yasmine':'26','amine':'1','dad':'76','mum':'67'}
yaml_output="yaml_output.yml"
json_output="json_output.json"
json_list=[]
yaml_list=[]
with open(yaml_output,"r") as f:
yaml_list=yaml.load(f)
with open(json_output,"r") as f:
json_list=json.load(f)
print "JSON_LIST: ", output_format(json_list,'JSON')
print "YAML_LIST: ",output_format(yaml_list,'YAML')
if __name__ == '__main__':
main()
| 17.512195 | 72 | 0.650418 |
import yaml
import json
from pprint import pprint
def output_format(my_list, my_str):
print "Converting from " , my_str ,"FORMAT"
print "#" * 30
pprint (my_list)
def main():
#my_dict={'karim':'32','yasmine':'26','amine':'1','dad':'76','mum':'67'}
yaml_output="yaml_output.yml"
json_output="json_output.json"
json_list=[]
yaml_list=[]
with open(yaml_output,"r") as f:
yaml_list=yaml.load(f)
with open(json_output,"r") as f:
json_list=json.load(f)
print "JSON_LIST: ", output_format(json_list,'JSON')
print "YAML_LIST: ",output_format(yaml_list,'YAML')
if __name__ == '__main__':
main()
| false | true |
f7263d929de143ba1a7546baa19c897e7ec8745c | 18,460 | py | Python | cybergis_compute_client/CyberGISCompute.py | alexandermichels/cybergis-compute-python-sdk | 6e7790a627368d0031582fe44a58fdb514868950 | [
"Apache-2.0"
] | null | null | null | cybergis_compute_client/CyberGISCompute.py | alexandermichels/cybergis-compute-python-sdk | 6e7790a627368d0031582fe44a58fdb514868950 | [
"Apache-2.0"
] | null | null | null | cybergis_compute_client/CyberGISCompute.py | alexandermichels/cybergis-compute-python-sdk | 6e7790a627368d0031582fe44a58fdb514868950 | [
"Apache-2.0"
] | null | null | null | """
This module exposes CyberGISCompute class which creates a CyberGISCompute
object that serves as an entry point to the CyberGISX environment from a Python/Jupyter notebook.
All interactions with the High Performance Computing (HPC) backend are performed using this object.
Example:
cybergis = CyberGISCompute(url='localhost', port='3030', protocol='HTTP', isJupyter=False)
"""
from .Client import *
from .Job import *
from .UI import *
import base64
import os
from IPython.display import display, Markdown, Javascript
class CyberGISCompute:
"""
CyberGISCompute class
An inteface that handles all interactions with the HPC backend
Attributes:
client (Client object) : Initialized using url(str), protocol(str), port(str) and suffix(str)
jupyterhubApiToken (string) : jupyterhub's REST API token that can be used to authenticate the user
(https://jhubdocs.readthedocs.io/en/latest/jupyterhub/docs/source/rest.html)
username (string) : username
isJupyter (boolean) : set to True if you are working in a jupyter environment.
If you are working in a simple Python environment then set to False
ui (UI) : Serves as entry point to UI functionality
job (Job) : Serves as entry point to access job interactions
recentDownloadPath (str) : Gets the most recent download path from globus
jupyterhubHost (str) : static variable that stores the path to jupyterhubHost
"""
# static variable
jupyterhubHost = None
job = None
def __init__(self, url="cgjobsup.cigi.illinois.edu", port=443, protocol='HTTPS', suffix="", isJupyter=True):
"""
Initializes instance CyberGISCompute using inputs from the client
Args:
url (str) : url that needs to be accessed
port (str) : port of the Jupyter or Python interface
protocol (str) : Typically HTTP or HTTPS
suffix (str) : specify version. For e.g v2
isJupyter(booleans) : set to True if you are using Jupyter environment
Returns:
(obj) : this CyberGISCompute
"""
self.client = Client(url=url, protocol=protocol, port=port, suffix=suffix)
self.jupyterhubApiToken = None
self.username = None
self.isJupyter = isJupyter
self.ui = UI(self)
if isJupyter:
self.enable_jupyter()
# job
self.job = None
self.recentDownloadPath = None
def login(self, manualLogin=True):
"""
Authenticates the client's jupyterhubApiToken and gives them access
to CyberGISCompute features
Args:
manualLogin (boolean) : set to True if env variable and file login modes are not available
Returns :
None
"""
if self.jupyterhubApiToken is not None:
print('🎯 Logged in as ' + self.username)
return
# login via env variable
envToken = os.getenv('JUPYTERHUB_API_TOKEN')
if envToken is not None:
print('💻 Found system token')
try:
token = base64.b64encode((self.jupyterhubHost + '@' + envToken).encode('ascii')).decode('utf-8')
res = self.client.request('GET', '/user', {"jupyterhubApiToken": token})
self.jupyterhubApiToken = token
self.username = res['username']
return self.login()
except:
print('❌ Failed to login via system token')
# login via file
if path.exists('./cybergis_compute_user.json'):
with open(os.path.abspath('cybergis_compute_user.json')) as f:
user = json.load(f)
token = user['token']
print('📃 Found "cybergis_compute_user.json"')
try:
res = self.client.request('GET', '/user', {"jupyterhubApiToken": token})
self.jupyterhubApiToken = token
self.username = res['username']
return self.login()
except:
print('❌ Failed to login via token JSON file')
print('NOTE: if you want to login as another user, please remove this file')
elif manualLogin:
if self.isJupyter:
if (self.jupyterhubHost is not None):
import getpass
print('📢 Please go to Control Panel -> Token, request a new API token')
token = getpass.getpass('enter your API token here')
token = base64.b64encode((self.jupyterhubHost + '@' + token).encode('ascii')).decode('utf-8')
try:
res = self.client.request('GET', '/user', {"jupyterhubApiToken": token})
self.jupyterhubApiToken = token
self.username = res['username']
with open('./cybergis_compute_user.json', 'w') as json_file:
json.dump({"token": token}, json_file)
return self.login()
except:
print('❌ Failed to login via user input')
else:
print('❌ You might not be working on a web browser or enabled JavaScript')
else:
print('❌ Enable Jupyter using .enable_jupyter() before you login')
else:
print('❌ Not logged in. To enable more features, use .login()')
def create_job(self, maintainer='community_contribution', hpc=None, hpcUsername=None, hpcPassword=None, printJob=True):
"""
Creates a job object
Initializes instance CyberGISCompute using inputs from the client
Args:
maintainer (str) : Pre-packaged programs which can be configured and controlled remotely
and behave as a bridge between user and HPC backends
hpc(str) : HPC backend that is being accessed. For e.g 'keeling_community'
hpcUsername (str) : username for HPC backend
hpcPassword (str) : password for HPC backend
printJob (str) : prints the Job infortmation if set to True
Returns:
(Job) : The new job instance that was initialized
"""
self.login()
return Job(maintainer=maintainer, hpc=hpc, id=None, hpcUsername=hpcUsername, hpcPassword=hpcPassword, client=self.client, isJupyter=self.isJupyter, jupyterhubApiToken=self.jupyterhubApiToken, printJob=printJob)
def get_job_by_id(self, id=None):
"""
Returns Job object with the specified id
Args:
id(int) : Job id
Returns
(Job) : Job object with the specified id otherwise None
"""
self.login()
jobs = self.client.request('GET', '/user/job', {"jupyterhubApiToken": self.jupyterhubApiToken})
token = None
for job in jobs['job']:
if (job['id'] == id):
token = job['secretToken']
if (token is None):
print('❌ job with id ' + id + ' was not found')
return Job(secretToken=token, client=self.client, id=id, isJupyter=self.isJupyter, jupyterhubApiToken=self.jupyterhubApiToken)
def get_slurm_usage(self, raw=False):
"""
prints slurm usage
Args:
raw(boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed or displayed directly into the interface
"""
self.login()
usage = self.client.request('GET', '/user/slurm-usage?format={}'.format(not raw), {"jupyterhubApiToken": self.jupyterhubApiToken})
if raw:
return usage
display(Markdown("Nodes: {}<br>Allocated CPUs: {}<br>Total CPU Time: {}<br>Memory Utilized: {}<br>Total Allocated Memory: {}<br>Total Walltime: {}".format(
usage['nodes'], usage['cpus'], usage['cpuTime'], usage['memory'], usage['memoryUsage'], usage['walltime'])))
def list_job(self, raw=False):
"""
prints a list of jobs that were submitted
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed or displayed into the interface
"""
self.login()
if self.jupyterhubApiToken is None:
print('❌ please login')
jobs = self.client.request('GET', '/user/job', {"jupyterhubApiToken": self.jupyterhubApiToken})
if raw:
return jobs
headers = ['id', 'hpc', 'executableFolder', 'dataFolder', 'resultFolder', 'param', 'slurm', 'userId', 'maintainer', 'createdAt']
data = []
for job in jobs['job']:
data.append([
job['id'],
job['hpc'],
job['executableFolder'],
job['dataFolder'],
job['resultFolder'],
json.dumps(job['param']),
json.dumps(job['slurm']),
job['userId'],
job['maintainer'],
job['createdAt'],
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_hpc(self, raw=False):
"""
prints a list of hpc resources that the server supports
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed
or displayed directly into the interface
"""
hpc = self.client.request('GET', '/hpc')['hpc']
if raw:
return hpc
headers = ['hpc', 'ip', 'port', 'is_community_account']
data = []
for i in hpc:
data.append([
i,
hpc[i]['ip'],
hpc[i]['port'],
hpc[i]['is_community_account']
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_container(self, raw=False):
"""
prints a list of containers that the server supports
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed
or displayed directly into the interface
"""
container = self.client.request('GET', '/container')['container']
if raw:
return container
headers = ['container name', 'dockerfile', 'dockerhub']
data = []
for i in container:
data.append([
i,
container[i]['dockerfile'],
container[i]['dockerhub']
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_git(self, raw=False):
"""
prints a list of Git projects that the server supports
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed
or displayed directly into the interface
"""
git = self.client.request('GET', '/git')['git']
if raw:
return git
headers = ['link', 'name', 'container', 'repository', 'commit']
data = []
for i in git:
data.append([
'git://' + i,
git[i]['name'],
git[i]['container'],
git[i]['repository'],
git[i]['commit'] if 'commit' in git[i] else 'NONE',
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_maintainer(self, raw=False):
"""
prints a list of maintainers that the server supports
Args:
raw (boolean) : set to True if you want the raw output
Returns
(JSON) : Raw output if raw=True otherwise its printed
or displayed directly into the interface
"""
maintainers = self.client.request('GET', '/maintainer')['maintainer']
if raw:
return maintainers
headers = ['maintainer', 'hpc', 'default_hpc', 'job_pool_capacity', 'executable_folder->from_user', 'executable_folder->must_have']
data = []
for i in maintainers:
maintainer = maintainers[i]
from_user = 'not specified'
if 'executable_folder' in maintainer:
from_user = maintainer['executable_folder']['from_user']
must_have = 'not specified'
if 'executable_folder' in maintainer:
if 'file_config' in maintainer['executable_folder']:
if 'must_have' in maintainer['executable_folder']['file_config']:
must_have = maintainer['executable_folder']['file_config']['must_have']
data.append([
i,
maintainer['hpc'],
maintainer['default_hpc'],
maintainer['job_pool_capacity'],
from_user,
must_have
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
# Integrated functions
def list_info(self, list_maintainer=False, list_container=False):
"""
calls list_git, list_hpc, list_job with options to call list_maintainer and list_container
Args:
list_maintainer (boolean) : set to True if you want to call list_maintainer
list_container (boolean) : set to True of you want to call list
Returns
None
"""
print('📦 Git repositories:')
self.list_git()
print('🖥 HPC endpoints:')
self.list_hpc()
if self.is_login():
print('📮 Submitted jobs:')
self.list_job()
if list_container:
print('🗳 Containers:')
self.list_container()
if list_maintainer:
print('🤖 Maintainers:')
self.list_maintainer()
def create_job_by_ui(self, defaultJob="hello_world", defaultDataFolder="./", defaultRemoteResultFolder=None):
"""
Displays the job submission UI
Args:
defaultJob (str) : Stores the default job that shows up on the UI
defaultDataFolder (str) : Stores the default input folder that shows up on the UI
defaultRemoteResultFolder (str) : Stores the default output folder that shows up on the UI
Returns:
None
"""
self.ui.defaultJobName = defaultJob
self.ui.defaultDataFolder = defaultDataFolder
if defaultRemoteResultFolder is not None:
self.ui.defaultRemoteResultFolder = defaultRemoteResultFolder if defaultRemoteResultFolder[0] == '/' else '/' + defaultRemoteResultFolder
self.ui.render()
def get_latest_created_job(self):
"""
Return the current job instance
Args:
None
Returns:
(JOB) : Latest Job object instance
"""
return self.job
# helper functions
def enable_jupyter(self):
"""
sets up jupyter environment in jupyterhubHost
Args:
None
Returns:
None
"""
self.isJupyter = True
# get jupyter variable
url = os.getenv('JUPYTER_INSTANCE_URL')
if url is not None:
CyberGISCompute.jupyterhubHost = url.replace('https://', '').replace('http://', '')
else:
display(Javascript('IPython.notebook.kernel.execute(`CyberGISCompute.jupyterhubHost = "${window.location.host}"`);'))
def get_user_jupyter_globus(self):
"""
Return the current job instance
Args:
None
Returns:
(JOB) : Latest Job object instance
"""
return self.client.request('GET', '/user/jupyter-globus', {"jupyterhubApiToken": self.jupyterhubApiToken})
def is_login(self):
"""
Checks whether jupyterhubApi token exists or not
Args:
None
Returns:
(boolean) : jupyterhubAPI existence check
"""
return self.jupyterhubApiToken is not None
| 41.483146 | 224 | 0.551842 |
from .Client import *
from .Job import *
from .UI import *
import base64
import os
from IPython.display import display, Markdown, Javascript
class CyberGISCompute:
jupyterhubHost = None
job = None
def __init__(self, url="cgjobsup.cigi.illinois.edu", port=443, protocol='HTTPS', suffix="", isJupyter=True):
self.client = Client(url=url, protocol=protocol, port=port, suffix=suffix)
self.jupyterhubApiToken = None
self.username = None
self.isJupyter = isJupyter
self.ui = UI(self)
if isJupyter:
self.enable_jupyter()
self.job = None
self.recentDownloadPath = None
def login(self, manualLogin=True):
if self.jupyterhubApiToken is not None:
print('🎯 Logged in as ' + self.username)
return
envToken = os.getenv('JUPYTERHUB_API_TOKEN')
if envToken is not None:
print('💻 Found system token')
try:
token = base64.b64encode((self.jupyterhubHost + '@' + envToken).encode('ascii')).decode('utf-8')
res = self.client.request('GET', '/user', {"jupyterhubApiToken": token})
self.jupyterhubApiToken = token
self.username = res['username']
return self.login()
except:
print('❌ Failed to login via system token')
if path.exists('./cybergis_compute_user.json'):
with open(os.path.abspath('cybergis_compute_user.json')) as f:
user = json.load(f)
token = user['token']
print('📃 Found "cybergis_compute_user.json"')
try:
res = self.client.request('GET', '/user', {"jupyterhubApiToken": token})
self.jupyterhubApiToken = token
self.username = res['username']
return self.login()
except:
print('❌ Failed to login via token JSON file')
print('NOTE: if you want to login as another user, please remove this file')
elif manualLogin:
if self.isJupyter:
if (self.jupyterhubHost is not None):
import getpass
print('📢 Please go to Control Panel -> Token, request a new API token')
token = getpass.getpass('enter your API token here')
token = base64.b64encode((self.jupyterhubHost + '@' + token).encode('ascii')).decode('utf-8')
try:
res = self.client.request('GET', '/user', {"jupyterhubApiToken": token})
self.jupyterhubApiToken = token
self.username = res['username']
with open('./cybergis_compute_user.json', 'w') as json_file:
json.dump({"token": token}, json_file)
return self.login()
except:
print('❌ Failed to login via user input')
else:
print('❌ You might not be working on a web browser or enabled JavaScript')
else:
print('❌ Enable Jupyter using .enable_jupyter() before you login')
else:
print('❌ Not logged in. To enable more features, use .login()')
def create_job(self, maintainer='community_contribution', hpc=None, hpcUsername=None, hpcPassword=None, printJob=True):
self.login()
return Job(maintainer=maintainer, hpc=hpc, id=None, hpcUsername=hpcUsername, hpcPassword=hpcPassword, client=self.client, isJupyter=self.isJupyter, jupyterhubApiToken=self.jupyterhubApiToken, printJob=printJob)
def get_job_by_id(self, id=None):
self.login()
jobs = self.client.request('GET', '/user/job', {"jupyterhubApiToken": self.jupyterhubApiToken})
token = None
for job in jobs['job']:
if (job['id'] == id):
token = job['secretToken']
if (token is None):
print('❌ job with id ' + id + ' was not found')
return Job(secretToken=token, client=self.client, id=id, isJupyter=self.isJupyter, jupyterhubApiToken=self.jupyterhubApiToken)
def get_slurm_usage(self, raw=False):
self.login()
usage = self.client.request('GET', '/user/slurm-usage?format={}'.format(not raw), {"jupyterhubApiToken": self.jupyterhubApiToken})
if raw:
return usage
display(Markdown("Nodes: {}<br>Allocated CPUs: {}<br>Total CPU Time: {}<br>Memory Utilized: {}<br>Total Allocated Memory: {}<br>Total Walltime: {}".format(
usage['nodes'], usage['cpus'], usage['cpuTime'], usage['memory'], usage['memoryUsage'], usage['walltime'])))
def list_job(self, raw=False):
self.login()
if self.jupyterhubApiToken is None:
print('❌ please login')
jobs = self.client.request('GET', '/user/job', {"jupyterhubApiToken": self.jupyterhubApiToken})
if raw:
return jobs
headers = ['id', 'hpc', 'executableFolder', 'dataFolder', 'resultFolder', 'param', 'slurm', 'userId', 'maintainer', 'createdAt']
data = []
for job in jobs['job']:
data.append([
job['id'],
job['hpc'],
job['executableFolder'],
job['dataFolder'],
job['resultFolder'],
json.dumps(job['param']),
json.dumps(job['slurm']),
job['userId'],
job['maintainer'],
job['createdAt'],
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_hpc(self, raw=False):
hpc = self.client.request('GET', '/hpc')['hpc']
if raw:
return hpc
headers = ['hpc', 'ip', 'port', 'is_community_account']
data = []
for i in hpc:
data.append([
i,
hpc[i]['ip'],
hpc[i]['port'],
hpc[i]['is_community_account']
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_container(self, raw=False):
container = self.client.request('GET', '/container')['container']
if raw:
return container
headers = ['container name', 'dockerfile', 'dockerhub']
data = []
for i in container:
data.append([
i,
container[i]['dockerfile'],
container[i]['dockerhub']
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_git(self, raw=False):
git = self.client.request('GET', '/git')['git']
if raw:
return git
headers = ['link', 'name', 'container', 'repository', 'commit']
data = []
for i in git:
data.append([
'git://' + i,
git[i]['name'],
git[i]['container'],
git[i]['repository'],
git[i]['commit'] if 'commit' in git[i] else 'NONE',
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_maintainer(self, raw=False):
maintainers = self.client.request('GET', '/maintainer')['maintainer']
if raw:
return maintainers
headers = ['maintainer', 'hpc', 'default_hpc', 'job_pool_capacity', 'executable_folder->from_user', 'executable_folder->must_have']
data = []
for i in maintainers:
maintainer = maintainers[i]
from_user = 'not specified'
if 'executable_folder' in maintainer:
from_user = maintainer['executable_folder']['from_user']
must_have = 'not specified'
if 'executable_folder' in maintainer:
if 'file_config' in maintainer['executable_folder']:
if 'must_have' in maintainer['executable_folder']['file_config']:
must_have = maintainer['executable_folder']['file_config']['must_have']
data.append([
i,
maintainer['hpc'],
maintainer['default_hpc'],
maintainer['job_pool_capacity'],
from_user,
must_have
])
if self.isJupyter:
if len(data) == 0:
print('empty')
return
display(HTML(tabulate(data, headers, numalign='left', stralign='left', colalign=('left', 'left'), tablefmt='html').replace('<td>', '<td style="text-align:left">').replace('<th>', '<th style="text-align:left">')))
else:
print(tabulate(data, headers, tablefmt="presto"))
def list_info(self, list_maintainer=False, list_container=False):
print('📦 Git repositories:')
self.list_git()
print('🖥 HPC endpoints:')
self.list_hpc()
if self.is_login():
print('📮 Submitted jobs:')
self.list_job()
if list_container:
print('🗳 Containers:')
self.list_container()
if list_maintainer:
print('🤖 Maintainers:')
self.list_maintainer()
def create_job_by_ui(self, defaultJob="hello_world", defaultDataFolder="./", defaultRemoteResultFolder=None):
self.ui.defaultJobName = defaultJob
self.ui.defaultDataFolder = defaultDataFolder
if defaultRemoteResultFolder is not None:
self.ui.defaultRemoteResultFolder = defaultRemoteResultFolder if defaultRemoteResultFolder[0] == '/' else '/' + defaultRemoteResultFolder
self.ui.render()
def get_latest_created_job(self):
return self.job
def enable_jupyter(self):
self.isJupyter = True
url = os.getenv('JUPYTER_INSTANCE_URL')
if url is not None:
CyberGISCompute.jupyterhubHost = url.replace('https://', '').replace('http://', '')
else:
display(Javascript('IPython.notebook.kernel.execute(`CyberGISCompute.jupyterhubHost = "${window.location.host}"`);'))
def get_user_jupyter_globus(self):
return self.client.request('GET', '/user/jupyter-globus', {"jupyterhubApiToken": self.jupyterhubApiToken})
def is_login(self):
return self.jupyterhubApiToken is not None
| true | true |
f7263dc3cc4e9a52a26c9b3cc1c629cd0270750c | 5,184 | py | Python | cvat/apps/dataset_manager/tests/test_annotation.py | TOsmanov/cvat | 71f94afd769d84c3fb3e3c720e26d927a47bb27b | [
"Intel",
"MIT"
] | 1 | 2019-12-09T13:53:36.000Z | 2019-12-09T13:53:36.000Z | cvat/apps/dataset_manager/tests/test_annotation.py | TOsmanov/cvat | 71f94afd769d84c3fb3e3c720e26d927a47bb27b | [
"Intel",
"MIT"
] | null | null | null | cvat/apps/dataset_manager/tests/test_annotation.py | TOsmanov/cvat | 71f94afd769d84c3fb3e3c720e26d927a47bb27b | [
"Intel",
"MIT"
] | null | null | null | # Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
from cvat.apps.dataset_manager.annotation import TrackManager
from unittest import TestCase
class TrackManagerTest(TestCase):
def _check_interpolation(self, track):
interpolated = TrackManager.get_interpolated_shapes(track, 0, 7)
self.assertEqual(len(interpolated), 6)
self.assertTrue(interpolated[0]["keyframe"])
self.assertFalse(interpolated[1]["keyframe"])
self.assertTrue(interpolated[2]["keyframe"])
self.assertTrue(interpolated[3]["keyframe"])
self.assertFalse(interpolated[4]["keyframe"])
self.assertFalse(interpolated[5]["keyframe"])
def test_point_interpolation(self):
track = {
"frame": 0,
"label_id": 0,
"group": None,
"source": "manual",
"attributes": [],
"shapes": [
{
"frame": 0,
"points": [1.0, 2.0],
"type": "points",
"occluded": False,
"outside": False,
"attributes": []
},
{
"frame": 2,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "points",
"occluded": False,
"outside": True
},
{
"frame": 4,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "points",
"occluded": False,
"outside": False
},
]
}
self._check_interpolation(track)
def test_polygon_interpolation(self):
track = {
"frame": 0,
"label_id": 0,
"group": None,
"attributes": [],
"source": "manual",
"shapes": [
{
"frame": 0,
"points": [1.0, 2.0, 3.0, 4.0, 5.0, 2.0],
"type": "polygon",
"occluded": False,
"outside": False,
"attributes": []
},
{
"frame": 2,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 4.0, 5.0],
"type": "polygon",
"occluded": False,
"outside": True
},
{
"frame": 4,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 4.0, 5.0],
"type": "polygon",
"occluded": False,
"outside": False
},
]
}
self._check_interpolation(track)
def test_bbox_interpolation(self):
track = {
"frame": 0,
"label_id": 0,
"group": None,
"attributes": [],
"source": "manual",
"shapes": [
{
"frame": 0,
"points": [1.0, 2.0, 3.0, 4.0],
"type": "rectangle",
"occluded": False,
"outside": False,
"attributes": []
},
{
"frame": 2,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "rectangle",
"occluded": False,
"outside": True
},
{
"frame": 4,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "rectangle",
"occluded": False,
"outside": False
},
]
}
self._check_interpolation(track)
def test_line_interpolation(self):
track = {
"frame": 0,
"label_id": 0,
"group": None,
"attributes": [],
"source": "manual",
"shapes": [
{
"frame": 0,
"points": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
"type": "polyline",
"occluded": False,
"outside": False,
"attributes": []
},
{
"frame": 2,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "polyline",
"occluded": False,
"outside": True
},
{
"frame": 4,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "polyline",
"occluded": False,
"outside": False
},
]
}
self._check_interpolation(track)
| 30.494118 | 72 | 0.342593 |
from cvat.apps.dataset_manager.annotation import TrackManager
from unittest import TestCase
class TrackManagerTest(TestCase):
def _check_interpolation(self, track):
interpolated = TrackManager.get_interpolated_shapes(track, 0, 7)
self.assertEqual(len(interpolated), 6)
self.assertTrue(interpolated[0]["keyframe"])
self.assertFalse(interpolated[1]["keyframe"])
self.assertTrue(interpolated[2]["keyframe"])
self.assertTrue(interpolated[3]["keyframe"])
self.assertFalse(interpolated[4]["keyframe"])
self.assertFalse(interpolated[5]["keyframe"])
def test_point_interpolation(self):
track = {
"frame": 0,
"label_id": 0,
"group": None,
"source": "manual",
"attributes": [],
"shapes": [
{
"frame": 0,
"points": [1.0, 2.0],
"type": "points",
"occluded": False,
"outside": False,
"attributes": []
},
{
"frame": 2,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "points",
"occluded": False,
"outside": True
},
{
"frame": 4,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "points",
"occluded": False,
"outside": False
},
]
}
self._check_interpolation(track)
def test_polygon_interpolation(self):
track = {
"frame": 0,
"label_id": 0,
"group": None,
"attributes": [],
"source": "manual",
"shapes": [
{
"frame": 0,
"points": [1.0, 2.0, 3.0, 4.0, 5.0, 2.0],
"type": "polygon",
"occluded": False,
"outside": False,
"attributes": []
},
{
"frame": 2,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 4.0, 5.0],
"type": "polygon",
"occluded": False,
"outside": True
},
{
"frame": 4,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 4.0, 5.0],
"type": "polygon",
"occluded": False,
"outside": False
},
]
}
self._check_interpolation(track)
def test_bbox_interpolation(self):
track = {
"frame": 0,
"label_id": 0,
"group": None,
"attributes": [],
"source": "manual",
"shapes": [
{
"frame": 0,
"points": [1.0, 2.0, 3.0, 4.0],
"type": "rectangle",
"occluded": False,
"outside": False,
"attributes": []
},
{
"frame": 2,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "rectangle",
"occluded": False,
"outside": True
},
{
"frame": 4,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "rectangle",
"occluded": False,
"outside": False
},
]
}
self._check_interpolation(track)
def test_line_interpolation(self):
track = {
"frame": 0,
"label_id": 0,
"group": None,
"attributes": [],
"source": "manual",
"shapes": [
{
"frame": 0,
"points": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
"type": "polyline",
"occluded": False,
"outside": False,
"attributes": []
},
{
"frame": 2,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "polyline",
"occluded": False,
"outside": True
},
{
"frame": 4,
"attributes": [],
"points": [3.0, 4.0, 5.0, 6.0],
"type": "polyline",
"occluded": False,
"outside": False
},
]
}
self._check_interpolation(track)
| true | true |
f7263deac95f43b73909d3038f4d4488fa2639d4 | 997 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/bulk_email/migrations/0006_course_mode_targets.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/bulk_email/migrations/0006_course_mode_targets.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/bulk_email/migrations/0006_course_mode_targets.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0007_coursemode_bulk_sku'),
('bulk_email', '0005_move_target_data'),
]
operations = [
migrations.CreateModel(
name='CourseModeTarget',
fields=[
('target_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='bulk_email.Target', on_delete=models.CASCADE)),
('track', models.ForeignKey(to='course_modes.CourseMode', on_delete=models.CASCADE)),
],
bases=('bulk_email.target',),
),
migrations.AlterField(
model_name='target',
name='target_type',
field=models.CharField(max_length=64, choices=[('myself', 'Myself'), ('staff', 'Staff and instructors'), ('learners', 'All students'), ('cohort', 'Specific cohort'), ('track', 'Specific course mode')]),
),
]
| 38.346154 | 214 | 0.609829 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_modes', '0007_coursemode_bulk_sku'),
('bulk_email', '0005_move_target_data'),
]
operations = [
migrations.CreateModel(
name='CourseModeTarget',
fields=[
('target_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='bulk_email.Target', on_delete=models.CASCADE)),
('track', models.ForeignKey(to='course_modes.CourseMode', on_delete=models.CASCADE)),
],
bases=('bulk_email.target',),
),
migrations.AlterField(
model_name='target',
name='target_type',
field=models.CharField(max_length=64, choices=[('myself', 'Myself'), ('staff', 'Staff and instructors'), ('learners', 'All students'), ('cohort', 'Specific cohort'), ('track', 'Specific course mode')]),
),
]
| true | true |
f7263e61252a6c08d784082742fd67b173531d8d | 195 | py | Python | examples/PythonToDo/models/domain_objects/UserModel.py | PqES/ArchPython | 142b02f2c7fff9a3fb375a338af2de2e688f4004 | [
"MIT"
] | 6 | 2020-07-13T23:41:49.000Z | 2022-02-01T21:02:46.000Z | examples/PythonToDo/models/domain_objects/UserModel.py | LimaEduardo/ArchPython | 5a81d0c79d7933f06c26175b1958d604b4c248df | [
"MIT"
] | null | null | null | examples/PythonToDo/models/domain_objects/UserModel.py | LimaEduardo/ArchPython | 5a81d0c79d7933f06c26175b1958d604b4c248df | [
"MIT"
] | 1 | 2020-09-07T13:04:25.000Z | 2020-09-07T13:04:25.000Z | class UserModel:
def __init__(self, name = None, login = None, password = None):
self.id = None
self.name = name
self.login = login
self.password = password
| 21.666667 | 67 | 0.584615 | class UserModel:
def __init__(self, name = None, login = None, password = None):
self.id = None
self.name = name
self.login = login
self.password = password
| true | true |
f7263f6c62c388fe49d7af215f78c0b7c0576dd0 | 191 | py | Python | chatbot_tutorial/urls.py | abdulmuizzf/django-bot-server-tutorial | 3f9d69bb848ed70e664503aac2c968416b7a891d | [
"MIT"
] | null | null | null | chatbot_tutorial/urls.py | abdulmuizzf/django-bot-server-tutorial | 3f9d69bb848ed70e664503aac2c968416b7a891d | [
"MIT"
] | null | null | null | chatbot_tutorial/urls.py | abdulmuizzf/django-bot-server-tutorial | 3f9d69bb848ed70e664503aac2c968416b7a891d | [
"MIT"
] | null | null | null | from django.conf.urls import include
from django.urls import path
from django.contrib import admin
urlpatterns = [
path('', include('chat.urls')),
path('admin/', admin.site.urls),
]
| 21.222222 | 36 | 0.706806 | from django.conf.urls import include
from django.urls import path
from django.contrib import admin
urlpatterns = [
path('', include('chat.urls')),
path('admin/', admin.site.urls),
]
| true | true |
f7263fda03fe7c55336dc033a79f684a23583ecb | 3,647 | py | Python | circular_cylinder/figures/plot.py | J-Massey/postproc | 4552b0ad79072f5d217cf62632c08617ea3d2d82 | [
"MIT"
] | null | null | null | circular_cylinder/figures/plot.py | J-Massey/postproc | 4552b0ad79072f5d217cf62632c08617ea3d2d82 | [
"MIT"
] | null | null | null | circular_cylinder/figures/plot.py | J-Massey/postproc | 4552b0ad79072f5d217cf62632c08617ea3d2d82 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from itertools import product
import os
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import ticker, cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import FormatStrFormatter
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import BoundaryNorm
colors = sns.color_palette("husl", 4)
plt.style.use(['science', 'grid'])
def plot_loss(epochs, cost, fn='cost.pdf'):
fig, ax = plt.subplots(figsize=(5, 3))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"Epochs")
ax.set_ylabel(r'$L_2$ loss')
ax.plot_fill(np.linspace(0, epochs, len(cost)), cost, label=r'$L_{2}$')
ax.legend()
plt.savefig(fn)
plt.show()
def plot_model(cd_hat, fos, Y, fn='model.pdf'):
fig, ax = plt.subplots(figsize=(5, 3))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"$t/D$")
ax.set_ylabel(r'$C_{D_f}$')
ax.plot_fill(fos['t'], Y, label=r'Ground truth')
ax.plot_fill(fos['t'], cd_hat, label=r'$\hat{C_{D_f}}$')
ax.legend()
plt.savefig(fn)
plt.show()
def plot_BL_corruption():
fig, ax = plt.subplots(figsize=(5, 5))
ax.set_xlabel(r'$x_n$')
ax.set_ylabel(r'$y_n$', rotation=0)
# Define grid
D = 32
eps = 2
r = D / 2
x, y = np.arange(-D, D + 1, 1), np.arange(-D, D + 1, 1)
X, Y = np.meshgrid(x, y)
# Body coordinates
theta = np.linspace(0, 2 * np.pi, int(D * np.pi))
Bx, By = r * np.cos(theta), r * np.sin(theta)
ax.plot_fill(Bx, By, color='k', linewidth=2., label=r'Hard body boundary')
Bepx, Bepy = (r + eps) * np.cos(theta), (r + eps) * np.sin(theta)
ax.plot_fill(Bepx, Bepy, c='blue', linewidth=0.5, label=r'$D+\epsilon$')
# Distance function from eps away from body edge
dis = np.sqrt(X ** 2 + Y ** 2)
# Cmap definition
bs = iter((np.array([14, 15.8, 18.7, 22]) - 4.5) / D)
colours = [(0, 'midnightblue'),
(next(bs), 'midnightblue'),
(next(bs), 'red'),
(next(bs), 'green'),
(next(bs), 'royalblue'),
(1, 'royalblue')]
cmap = LinearSegmentedColormap.from_list('corruption', colours, 256)
cs = ax.imshow(dis, zorder=0, aspect="auto", extent=(-D, D, -D, D),
cmap=cmap, interpolation='bicubic')
make_axes_locatable(ax)
divider = make_axes_locatable(ax)
ax_cb = divider.new_horizontal(size="5%", pad=0.05)
fig.add_axes(ax_cb)
cbar = plt.colorbar(cs, cax=ax_cb, ticks=[8, 16.4, 21, 32], extend='max')
# ax_cb.yaxis.tick_right()
cbar.ax.set_yticklabels([r'$\vec{b}$', r'$\vec{b}*\vec{f}$', r'$d|_{n \approx 0}$', r'$\vec{f}$'])
cbar.ax.tick_params(which='both', size=0)
ax.legend()
plt.savefig('../figures/bl_corruption.pdf', dpi=300)
plt.close()
def plot_pressure():
data_root = '/home/masseyjmo/Workspace/Lotus/projects/cylinder_dns/validation'
p = np.loadtxt(os.path.join(data_root, 'fort.10'), unpack=True)
p = np.mean(p, axis=1)
fig, ax = plt.subplots(figsize=(4, 4))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"$\theta$")
ax.set_ylabel(r'$C_{p}$')
ax.scatter(np.linspace(0, np.pi / 2, len(p)), p * 2, label=r'Pressure distribution', color='k', marker='+')
ax.set_ylim(-2, 1)
ax.legend()
plt.savefig('pressure_theta.pdf')
plt.show()
if __name__ == "__main__":
plot_BL_corruption()
| 34.084112 | 111 | 0.619139 | import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from itertools import product
import os
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import ticker, cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import FormatStrFormatter
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import BoundaryNorm
colors = sns.color_palette("husl", 4)
plt.style.use(['science', 'grid'])
def plot_loss(epochs, cost, fn='cost.pdf'):
fig, ax = plt.subplots(figsize=(5, 3))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"Epochs")
ax.set_ylabel(r'$L_2$ loss')
ax.plot_fill(np.linspace(0, epochs, len(cost)), cost, label=r'$L_{2}$')
ax.legend()
plt.savefig(fn)
plt.show()
def plot_model(cd_hat, fos, Y, fn='model.pdf'):
fig, ax = plt.subplots(figsize=(5, 3))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"$t/D$")
ax.set_ylabel(r'$C_{D_f}$')
ax.plot_fill(fos['t'], Y, label=r'Ground truth')
ax.plot_fill(fos['t'], cd_hat, label=r'$\hat{C_{D_f}}$')
ax.legend()
plt.savefig(fn)
plt.show()
def plot_BL_corruption():
fig, ax = plt.subplots(figsize=(5, 5))
ax.set_xlabel(r'$x_n$')
ax.set_ylabel(r'$y_n$', rotation=0)
D = 32
eps = 2
r = D / 2
x, y = np.arange(-D, D + 1, 1), np.arange(-D, D + 1, 1)
X, Y = np.meshgrid(x, y)
theta = np.linspace(0, 2 * np.pi, int(D * np.pi))
Bx, By = r * np.cos(theta), r * np.sin(theta)
ax.plot_fill(Bx, By, color='k', linewidth=2., label=r'Hard body boundary')
Bepx, Bepy = (r + eps) * np.cos(theta), (r + eps) * np.sin(theta)
ax.plot_fill(Bepx, Bepy, c='blue', linewidth=0.5, label=r'$D+\epsilon$')
dis = np.sqrt(X ** 2 + Y ** 2)
bs = iter((np.array([14, 15.8, 18.7, 22]) - 4.5) / D)
colours = [(0, 'midnightblue'),
(next(bs), 'midnightblue'),
(next(bs), 'red'),
(next(bs), 'green'),
(next(bs), 'royalblue'),
(1, 'royalblue')]
cmap = LinearSegmentedColormap.from_list('corruption', colours, 256)
cs = ax.imshow(dis, zorder=0, aspect="auto", extent=(-D, D, -D, D),
cmap=cmap, interpolation='bicubic')
make_axes_locatable(ax)
divider = make_axes_locatable(ax)
ax_cb = divider.new_horizontal(size="5%", pad=0.05)
fig.add_axes(ax_cb)
cbar = plt.colorbar(cs, cax=ax_cb, ticks=[8, 16.4, 21, 32], extend='max')
cbar.ax.set_yticklabels([r'$\vec{b}$', r'$\vec{b}*\vec{f}$', r'$d|_{n \approx 0}$', r'$\vec{f}$'])
cbar.ax.tick_params(which='both', size=0)
ax.legend()
plt.savefig('../figures/bl_corruption.pdf', dpi=300)
plt.close()
def plot_pressure():
data_root = '/home/masseyjmo/Workspace/Lotus/projects/cylinder_dns/validation'
p = np.loadtxt(os.path.join(data_root, 'fort.10'), unpack=True)
p = np.mean(p, axis=1)
fig, ax = plt.subplots(figsize=(4, 4))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"$\theta$")
ax.set_ylabel(r'$C_{p}$')
ax.scatter(np.linspace(0, np.pi / 2, len(p)), p * 2, label=r'Pressure distribution', color='k', marker='+')
ax.set_ylim(-2, 1)
ax.legend()
plt.savefig('pressure_theta.pdf')
plt.show()
if __name__ == "__main__":
plot_BL_corruption()
| true | true |
f7263ff6a3d742c589e550267ff561f470351035 | 36,853 | py | Python | python/istio_api/mesh/v1alpha1/config_pb2.py | selmanj/api | 6166b45d34e2ef8915225b2f849855b5d28fc4f9 | [
"Apache-2.0"
] | null | null | null | python/istio_api/mesh/v1alpha1/config_pb2.py | selmanj/api | 6166b45d34e2ef8915225b2f849855b5d28fc4f9 | [
"Apache-2.0"
] | null | null | null | python/istio_api/mesh/v1alpha1/config_pb2.py | selmanj/api | 6166b45d34e2ef8915225b2f849855b5d28fc4f9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mesh/v1alpha1/config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from mesh.v1alpha1 import proxy_pb2 as mesh_dot_v1alpha1_dot_proxy__pb2
from networking.v1alpha3 import destination_rule_pb2 as networking_dot_v1alpha3_dot_destination__rule__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mesh/v1alpha1/config.proto',
package='istio.mesh.v1alpha1',
syntax='proto3',
serialized_options=_b('Z\032istio.io/api/mesh/v1alpha1'),
serialized_pb=_b('\n\x1amesh/v1alpha1/config.proto\x12\x13istio.mesh.v1alpha1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x19mesh/v1alpha1/proxy.proto\x1a*networking/v1alpha3/destination_rule.proto\"\x8f\x13\n\nMeshConfig\x12\x1a\n\x12mixer_check_server\x18\x01 \x01(\t\x12\x1b\n\x13mixer_report_server\x18\x02 \x01(\t\x12\x1d\n\x15\x64isable_policy_checks\x18\x03 \x01(\x08\x12\"\n\x1a\x64isable_mixer_http_reports\x18\x30 \x01(\x08\x12\x1e\n\x16policy_check_fail_open\x18\x19 \x01(\x08\x12-\n%sidecar_to_telemetry_session_affinity\x18\x1e \x01(\x08\x12\x19\n\x11proxy_listen_port\x18\x04 \x01(\x05\x12\x17\n\x0fproxy_http_port\x18\x05 \x01(\x05\x12\x32\n\x0f\x63onnect_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12=\n\x1aprotocol_detection_timeout\x18* \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x61\n\rtcp_keepalive\x18\x1c \x01(\x0b\x32J.istio.networking.v1alpha3.ConnectionPoolSettings.TCPSettings.TcpKeepalive\x12\x15\n\ringress_class\x18\x07 \x01(\t\x12\x17\n\x0fingress_service\x18\x08 \x01(\t\x12V\n\x17ingress_controller_mode\x18\t \x01(\x0e\x32\x35.istio.mesh.v1alpha1.MeshConfig.IngressControllerMode\x12\x43\n\x0b\x61uth_policy\x18\n \x01(\x0e\x32*.istio.mesh.v1alpha1.MeshConfig.AuthPolicyB\x02\x18\x01\x12\x38\n\x11rds_refresh_delay\x18\x0b \x01(\x0b\x32\x19.google.protobuf.DurationB\x02\x18\x01\x12\x16\n\x0e\x65nable_tracing\x18\x0c \x01(\x08\x12\x17\n\x0f\x61\x63\x63\x65ss_log_file\x18\r \x01(\t\x12\x19\n\x11\x61\x63\x63\x65ss_log_format\x18\x18 \x01(\t\x12N\n\x13\x61\x63\x63\x65ss_log_encoding\x18\x1b \x01(\x0e\x32\x31.istio.mesh.v1alpha1.MeshConfig.AccessLogEncoding\x12\'\n\x1f\x65nable_envoy_access_log_service\x18( \x01(\x08\x12\x38\n\x0e\x64\x65\x66\x61ult_config\x18\x0e \x01(\x0b\x32 .istio.mesh.v1alpha1.ProxyConfig\x12\x19\n\rmixer_address\x18\x10 \x01(\tB\x02\x18\x01\x12V\n\x17outbound_traffic_policy\x18\x11 \x01(\x0b\x32\x35.istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy\x12\'\n\x1f\x65nable_client_side_policy_check\x18\x13 \x01(\x08\x12\x18\n\x0csds_uds_path\x18\x14 \x01(\tB\x02\x18\x01\x12\x38\n\x11sds_refresh_delay\x18\x15 \x01(\x0b\x32\x19.google.protobuf.DurationB\x02\x18\x01\x12\x39\n\x0e\x63onfig_sources\x18\x16 \x03(\x0b\x32!.istio.mesh.v1alpha1.ConfigSource\x12\x34\n\x10\x65nable_auto_mtls\x18+ \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x1e\n\x16\x65nable_sds_token_mount\x18\x17 \x01(\x08\x12\x1a\n\x12sds_use_k8s_sa_jwt\x18\x1d \x01(\x08\x12\x14\n\x0ctrust_domain\x18\x1a \x01(\t\x12\x1c\n\x14trust_domain_aliases\x18. \x03(\t\x12!\n\x19\x64\x65\x66\x61ult_service_export_to\x18\x1f \x03(\t\x12)\n!default_virtual_service_export_to\x18 \x03(\t\x12*\n\"default_destination_rule_export_to\x18! \x03(\t\x12\x16\n\x0eroot_namespace\x18\" \x01(\t\x12S\n\x13locality_lb_setting\x18# \x01(\x0b\x32\x36.istio.networking.v1alpha3.LocalityLoadBalancerSetting\x12\x33\n\x10\x64ns_refresh_rate\x18$ \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x1c\n\x14\x64isable_report_batch\x18% \x01(\x08\x12 \n\x18report_batch_max_entries\x18& \x01(\r\x12\x38\n\x15report_batch_max_time\x18\' \x01(\x0b\x32\x19.google.protobuf.Duration\x12J\n\x11h2_upgrade_policy\x18) \x01(\x0e\x32/.istio.mesh.v1alpha1.MeshConfig.H2UpgradePolicy\x12!\n\x19inbound_cluster_stat_name\x18, \x01(\t\x12\"\n\x1aoutbound_cluster_stat_name\x18- \x01(\t\x12\x36\n\x0c\x63\x65rtificates\x18/ \x03(\x0b\x32 .istio.mesh.v1alpha1.Certificate\x1a\xa7\x01\n\x15OutboundTrafficPolicy\x12H\n\x04mode\x18\x01 \x01(\x0e\x32:.istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy.Mode\"D\n\x04Mode\x12\x11\n\rREGISTRY_ONLY\x10\x00\x12\r\n\tALLOW_ANY\x10\x01\"\x04\x08\x02\x10\x02*\x14VIRTUAL_SERVICE_ONLY\"9\n\x15IngressControllerMode\x12\x07\n\x03OFF\x10\x00\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x01\x12\n\n\x06STRICT\x10\x02\"&\n\nAuthPolicy\x12\x08\n\x04NONE\x10\x00\x12\x0e\n\nMUTUAL_TLS\x10\x01\"\'\n\x11\x41\x63\x63\x65ssLogEncoding\x12\x08\n\x04TEXT\x10\x00\x12\x08\n\x04JSON\x10\x01\"2\n\x0fH2UpgradePolicy\x12\x12\n\x0e\x44O_NOT_UPGRADE\x10\x00\x12\x0b\n\x07UPGRADE\x10\x01J\x04\x08\x0f\x10\x10J\x04\x08\x12\x10\x13\"\x9a\x01\n\x0c\x43onfigSource\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12<\n\x0ctls_settings\x18\x02 \x01(\x0b\x32&.istio.networking.v1alpha3.TLSSettings\x12;\n\x14subscribed_resources\x18\x03 \x03(\x0e\x32\x1d.istio.mesh.v1alpha1.Resource\"5\n\x0b\x43\x65rtificate\x12\x13\n\x0bsecret_name\x18\x01 \x01(\t\x12\x11\n\tdns_names\x18\x02 \x03(\t* \n\x08Resource\x12\x14\n\x10SERVICE_REGISTRY\x10\x00\x42\x1cZ\x1aistio.io/api/mesh/v1alpha1b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,mesh_dot_v1alpha1_dot_proxy__pb2.DESCRIPTOR,networking_dot_v1alpha3_dot_destination__rule__pb2.DESCRIPTOR,])
_RESOURCE = _descriptor.EnumDescriptor(
name='Resource',
full_name='istio.mesh.v1alpha1.Resource',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SERVICE_REGISTRY', index=0, number=0,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2848,
serialized_end=2880,
)
_sym_db.RegisterEnumDescriptor(_RESOURCE)
Resource = enum_type_wrapper.EnumTypeWrapper(_RESOURCE)
SERVICE_REGISTRY = 0
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE = _descriptor.EnumDescriptor(
name='Mode',
full_name='istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy.Mode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REGISTRY_ONLY', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALLOW_ANY', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2362,
serialized_end=2430,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE)
_MESHCONFIG_INGRESSCONTROLLERMODE = _descriptor.EnumDescriptor(
name='IngressControllerMode',
full_name='istio.mesh.v1alpha1.MeshConfig.IngressControllerMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OFF', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRICT', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2432,
serialized_end=2489,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_INGRESSCONTROLLERMODE)
_MESHCONFIG_AUTHPOLICY = _descriptor.EnumDescriptor(
name='AuthPolicy',
full_name='istio.mesh.v1alpha1.MeshConfig.AuthPolicy',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MUTUAL_TLS', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2491,
serialized_end=2529,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_AUTHPOLICY)
_MESHCONFIG_ACCESSLOGENCODING = _descriptor.EnumDescriptor(
name='AccessLogEncoding',
full_name='istio.mesh.v1alpha1.MeshConfig.AccessLogEncoding',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TEXT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JSON', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2531,
serialized_end=2570,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_ACCESSLOGENCODING)
_MESHCONFIG_H2UPGRADEPOLICY = _descriptor.EnumDescriptor(
name='H2UpgradePolicy',
full_name='istio.mesh.v1alpha1.MeshConfig.H2UpgradePolicy',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DO_NOT_UPGRADE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPGRADE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2572,
serialized_end=2622,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_H2UPGRADEPOLICY)
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY = _descriptor.Descriptor(
name='OutboundTrafficPolicy',
full_name='istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mode', full_name='istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy.mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2263,
serialized_end=2430,
)
_MESHCONFIG = _descriptor.Descriptor(
name='MeshConfig',
full_name='istio.mesh.v1alpha1.MeshConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mixer_check_server', full_name='istio.mesh.v1alpha1.MeshConfig.mixer_check_server', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mixer_report_server', full_name='istio.mesh.v1alpha1.MeshConfig.mixer_report_server', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_policy_checks', full_name='istio.mesh.v1alpha1.MeshConfig.disable_policy_checks', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_mixer_http_reports', full_name='istio.mesh.v1alpha1.MeshConfig.disable_mixer_http_reports', index=3,
number=48, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='policy_check_fail_open', full_name='istio.mesh.v1alpha1.MeshConfig.policy_check_fail_open', index=4,
number=25, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sidecar_to_telemetry_session_affinity', full_name='istio.mesh.v1alpha1.MeshConfig.sidecar_to_telemetry_session_affinity', index=5,
number=30, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proxy_listen_port', full_name='istio.mesh.v1alpha1.MeshConfig.proxy_listen_port', index=6,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proxy_http_port', full_name='istio.mesh.v1alpha1.MeshConfig.proxy_http_port', index=7,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connect_timeout', full_name='istio.mesh.v1alpha1.MeshConfig.connect_timeout', index=8,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_detection_timeout', full_name='istio.mesh.v1alpha1.MeshConfig.protocol_detection_timeout', index=9,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tcp_keepalive', full_name='istio.mesh.v1alpha1.MeshConfig.tcp_keepalive', index=10,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ingress_class', full_name='istio.mesh.v1alpha1.MeshConfig.ingress_class', index=11,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ingress_service', full_name='istio.mesh.v1alpha1.MeshConfig.ingress_service', index=12,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ingress_controller_mode', full_name='istio.mesh.v1alpha1.MeshConfig.ingress_controller_mode', index=13,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='auth_policy', full_name='istio.mesh.v1alpha1.MeshConfig.auth_policy', index=14,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rds_refresh_delay', full_name='istio.mesh.v1alpha1.MeshConfig.rds_refresh_delay', index=15,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_tracing', full_name='istio.mesh.v1alpha1.MeshConfig.enable_tracing', index=16,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_log_file', full_name='istio.mesh.v1alpha1.MeshConfig.access_log_file', index=17,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_log_format', full_name='istio.mesh.v1alpha1.MeshConfig.access_log_format', index=18,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_log_encoding', full_name='istio.mesh.v1alpha1.MeshConfig.access_log_encoding', index=19,
number=27, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_envoy_access_log_service', full_name='istio.mesh.v1alpha1.MeshConfig.enable_envoy_access_log_service', index=20,
number=40, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_config', full_name='istio.mesh.v1alpha1.MeshConfig.default_config', index=21,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mixer_address', full_name='istio.mesh.v1alpha1.MeshConfig.mixer_address', index=22,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outbound_traffic_policy', full_name='istio.mesh.v1alpha1.MeshConfig.outbound_traffic_policy', index=23,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_client_side_policy_check', full_name='istio.mesh.v1alpha1.MeshConfig.enable_client_side_policy_check', index=24,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sds_uds_path', full_name='istio.mesh.v1alpha1.MeshConfig.sds_uds_path', index=25,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sds_refresh_delay', full_name='istio.mesh.v1alpha1.MeshConfig.sds_refresh_delay', index=26,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config_sources', full_name='istio.mesh.v1alpha1.MeshConfig.config_sources', index=27,
number=22, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_auto_mtls', full_name='istio.mesh.v1alpha1.MeshConfig.enable_auto_mtls', index=28,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_sds_token_mount', full_name='istio.mesh.v1alpha1.MeshConfig.enable_sds_token_mount', index=29,
number=23, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sds_use_k8s_sa_jwt', full_name='istio.mesh.v1alpha1.MeshConfig.sds_use_k8s_sa_jwt', index=30,
number=29, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trust_domain', full_name='istio.mesh.v1alpha1.MeshConfig.trust_domain', index=31,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trust_domain_aliases', full_name='istio.mesh.v1alpha1.MeshConfig.trust_domain_aliases', index=32,
number=46, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_service_export_to', full_name='istio.mesh.v1alpha1.MeshConfig.default_service_export_to', index=33,
number=31, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_virtual_service_export_to', full_name='istio.mesh.v1alpha1.MeshConfig.default_virtual_service_export_to', index=34,
number=32, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_destination_rule_export_to', full_name='istio.mesh.v1alpha1.MeshConfig.default_destination_rule_export_to', index=35,
number=33, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='root_namespace', full_name='istio.mesh.v1alpha1.MeshConfig.root_namespace', index=36,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locality_lb_setting', full_name='istio.mesh.v1alpha1.MeshConfig.locality_lb_setting', index=37,
number=35, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dns_refresh_rate', full_name='istio.mesh.v1alpha1.MeshConfig.dns_refresh_rate', index=38,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_report_batch', full_name='istio.mesh.v1alpha1.MeshConfig.disable_report_batch', index=39,
number=37, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='report_batch_max_entries', full_name='istio.mesh.v1alpha1.MeshConfig.report_batch_max_entries', index=40,
number=38, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='report_batch_max_time', full_name='istio.mesh.v1alpha1.MeshConfig.report_batch_max_time', index=41,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='h2_upgrade_policy', full_name='istio.mesh.v1alpha1.MeshConfig.h2_upgrade_policy', index=42,
number=41, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inbound_cluster_stat_name', full_name='istio.mesh.v1alpha1.MeshConfig.inbound_cluster_stat_name', index=43,
number=44, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outbound_cluster_stat_name', full_name='istio.mesh.v1alpha1.MeshConfig.outbound_cluster_stat_name', index=44,
number=45, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='certificates', full_name='istio.mesh.v1alpha1.MeshConfig.certificates', index=45,
number=47, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MESHCONFIG_OUTBOUNDTRAFFICPOLICY, ],
enum_types=[
_MESHCONFIG_INGRESSCONTROLLERMODE,
_MESHCONFIG_AUTHPOLICY,
_MESHCONFIG_ACCESSLOGENCODING,
_MESHCONFIG_H2UPGRADEPOLICY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=187,
serialized_end=2634,
)
_CONFIGSOURCE = _descriptor.Descriptor(
name='ConfigSource',
full_name='istio.mesh.v1alpha1.ConfigSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='istio.mesh.v1alpha1.ConfigSource.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tls_settings', full_name='istio.mesh.v1alpha1.ConfigSource.tls_settings', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subscribed_resources', full_name='istio.mesh.v1alpha1.ConfigSource.subscribed_resources', index=2,
number=3, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2637,
serialized_end=2791,
)
_CERTIFICATE = _descriptor.Descriptor(
name='Certificate',
full_name='istio.mesh.v1alpha1.Certificate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secret_name', full_name='istio.mesh.v1alpha1.Certificate.secret_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dns_names', full_name='istio.mesh.v1alpha1.Certificate.dns_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2793,
serialized_end=2846,
)
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY.fields_by_name['mode'].enum_type = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY.containing_type = _MESHCONFIG
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE.containing_type = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY
_MESHCONFIG.fields_by_name['connect_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['protocol_detection_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['tcp_keepalive'].message_type = networking_dot_v1alpha3_dot_destination__rule__pb2._CONNECTIONPOOLSETTINGS_TCPSETTINGS_TCPKEEPALIVE
_MESHCONFIG.fields_by_name['ingress_controller_mode'].enum_type = _MESHCONFIG_INGRESSCONTROLLERMODE
_MESHCONFIG.fields_by_name['auth_policy'].enum_type = _MESHCONFIG_AUTHPOLICY
_MESHCONFIG.fields_by_name['rds_refresh_delay'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['access_log_encoding'].enum_type = _MESHCONFIG_ACCESSLOGENCODING
_MESHCONFIG.fields_by_name['default_config'].message_type = mesh_dot_v1alpha1_dot_proxy__pb2._PROXYCONFIG
_MESHCONFIG.fields_by_name['outbound_traffic_policy'].message_type = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY
_MESHCONFIG.fields_by_name['sds_refresh_delay'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['config_sources'].message_type = _CONFIGSOURCE
_MESHCONFIG.fields_by_name['enable_auto_mtls'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_MESHCONFIG.fields_by_name['locality_lb_setting'].message_type = networking_dot_v1alpha3_dot_destination__rule__pb2._LOCALITYLOADBALANCERSETTING
_MESHCONFIG.fields_by_name['dns_refresh_rate'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['report_batch_max_time'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['h2_upgrade_policy'].enum_type = _MESHCONFIG_H2UPGRADEPOLICY
_MESHCONFIG.fields_by_name['certificates'].message_type = _CERTIFICATE
_MESHCONFIG_INGRESSCONTROLLERMODE.containing_type = _MESHCONFIG
_MESHCONFIG_AUTHPOLICY.containing_type = _MESHCONFIG
_MESHCONFIG_ACCESSLOGENCODING.containing_type = _MESHCONFIG
_MESHCONFIG_H2UPGRADEPOLICY.containing_type = _MESHCONFIG
_CONFIGSOURCE.fields_by_name['tls_settings'].message_type = networking_dot_v1alpha3_dot_destination__rule__pb2._TLSSETTINGS
_CONFIGSOURCE.fields_by_name['subscribed_resources'].enum_type = _RESOURCE
DESCRIPTOR.message_types_by_name['MeshConfig'] = _MESHCONFIG
DESCRIPTOR.message_types_by_name['ConfigSource'] = _CONFIGSOURCE
DESCRIPTOR.message_types_by_name['Certificate'] = _CERTIFICATE
DESCRIPTOR.enum_types_by_name['Resource'] = _RESOURCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MeshConfig = _reflection.GeneratedProtocolMessageType('MeshConfig', (_message.Message,), {
'OutboundTrafficPolicy' : _reflection.GeneratedProtocolMessageType('OutboundTrafficPolicy', (_message.Message,), {
'DESCRIPTOR' : _MESHCONFIG_OUTBOUNDTRAFFICPOLICY,
'__module__' : 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy)
})
,
'DESCRIPTOR' : _MESHCONFIG,
'__module__' : 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.MeshConfig)
})
_sym_db.RegisterMessage(MeshConfig)
_sym_db.RegisterMessage(MeshConfig.OutboundTrafficPolicy)
ConfigSource = _reflection.GeneratedProtocolMessageType('ConfigSource', (_message.Message,), {
'DESCRIPTOR' : _CONFIGSOURCE,
'__module__' : 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.ConfigSource)
})
_sym_db.RegisterMessage(ConfigSource)
Certificate = _reflection.GeneratedProtocolMessageType('Certificate', (_message.Message,), {
'DESCRIPTOR' : _CERTIFICATE,
'__module__' : 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.Certificate)
})
_sym_db.RegisterMessage(Certificate)
DESCRIPTOR._options = None
_MESHCONFIG.fields_by_name['auth_policy']._options = None
_MESHCONFIG.fields_by_name['rds_refresh_delay']._options = None
_MESHCONFIG.fields_by_name['mixer_address']._options = None
_MESHCONFIG.fields_by_name['sds_uds_path']._options = None
_MESHCONFIG.fields_by_name['sds_refresh_delay']._options = None
# @@protoc_insertion_point(module_scope)
| 52.57204 | 4,551 | 0.7674 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from mesh.v1alpha1 import proxy_pb2 as mesh_dot_v1alpha1_dot_proxy__pb2
from networking.v1alpha3 import destination_rule_pb2 as networking_dot_v1alpha3_dot_destination__rule__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mesh/v1alpha1/config.proto',
package='istio.mesh.v1alpha1',
syntax='proto3',
serialized_options=_b('Z\032istio.io/api/mesh/v1alpha1'),
serialized_pb=_b('\n\x1amesh/v1alpha1/config.proto\x12\x13istio.mesh.v1alpha1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x19mesh/v1alpha1/proxy.proto\x1a*networking/v1alpha3/destination_rule.proto\"\x8f\x13\n\nMeshConfig\x12\x1a\n\x12mixer_check_server\x18\x01 \x01(\t\x12\x1b\n\x13mixer_report_server\x18\x02 \x01(\t\x12\x1d\n\x15\x64isable_policy_checks\x18\x03 \x01(\x08\x12\"\n\x1a\x64isable_mixer_http_reports\x18\x30 \x01(\x08\x12\x1e\n\x16policy_check_fail_open\x18\x19 \x01(\x08\x12-\n%sidecar_to_telemetry_session_affinity\x18\x1e \x01(\x08\x12\x19\n\x11proxy_listen_port\x18\x04 \x01(\x05\x12\x17\n\x0fproxy_http_port\x18\x05 \x01(\x05\x12\x32\n\x0f\x63onnect_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12=\n\x1aprotocol_detection_timeout\x18* \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x61\n\rtcp_keepalive\x18\x1c \x01(\x0b\x32J.istio.networking.v1alpha3.ConnectionPoolSettings.TCPSettings.TcpKeepalive\x12\x15\n\ringress_class\x18\x07 \x01(\t\x12\x17\n\x0fingress_service\x18\x08 \x01(\t\x12V\n\x17ingress_controller_mode\x18\t \x01(\x0e\x32\x35.istio.mesh.v1alpha1.MeshConfig.IngressControllerMode\x12\x43\n\x0b\x61uth_policy\x18\n \x01(\x0e\x32*.istio.mesh.v1alpha1.MeshConfig.AuthPolicyB\x02\x18\x01\x12\x38\n\x11rds_refresh_delay\x18\x0b \x01(\x0b\x32\x19.google.protobuf.DurationB\x02\x18\x01\x12\x16\n\x0e\x65nable_tracing\x18\x0c \x01(\x08\x12\x17\n\x0f\x61\x63\x63\x65ss_log_file\x18\r \x01(\t\x12\x19\n\x11\x61\x63\x63\x65ss_log_format\x18\x18 \x01(\t\x12N\n\x13\x61\x63\x63\x65ss_log_encoding\x18\x1b \x01(\x0e\x32\x31.istio.mesh.v1alpha1.MeshConfig.AccessLogEncoding\x12\'\n\x1f\x65nable_envoy_access_log_service\x18( \x01(\x08\x12\x38\n\x0e\x64\x65\x66\x61ult_config\x18\x0e \x01(\x0b\x32 .istio.mesh.v1alpha1.ProxyConfig\x12\x19\n\rmixer_address\x18\x10 \x01(\tB\x02\x18\x01\x12V\n\x17outbound_traffic_policy\x18\x11 \x01(\x0b\x32\x35.istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy\x12\'\n\x1f\x65nable_client_side_policy_check\x18\x13 \x01(\x08\x12\x18\n\x0csds_uds_path\x18\x14 \x01(\tB\x02\x18\x01\x12\x38\n\x11sds_refresh_delay\x18\x15 \x01(\x0b\x32\x19.google.protobuf.DurationB\x02\x18\x01\x12\x39\n\x0e\x63onfig_sources\x18\x16 \x03(\x0b\x32!.istio.mesh.v1alpha1.ConfigSource\x12\x34\n\x10\x65nable_auto_mtls\x18+ \x01(\x0b\x32\x1a.google.protobuf.BoolValue\x12\x1e\n\x16\x65nable_sds_token_mount\x18\x17 \x01(\x08\x12\x1a\n\x12sds_use_k8s_sa_jwt\x18\x1d \x01(\x08\x12\x14\n\x0ctrust_domain\x18\x1a \x01(\t\x12\x1c\n\x14trust_domain_aliases\x18. \x03(\t\x12!\n\x19\x64\x65\x66\x61ult_service_export_to\x18\x1f \x03(\t\x12)\n!default_virtual_service_export_to\x18 \x03(\t\x12*\n\"default_destination_rule_export_to\x18! \x03(\t\x12\x16\n\x0eroot_namespace\x18\" \x01(\t\x12S\n\x13locality_lb_setting\x18# \x01(\x0b\x32\x36.istio.networking.v1alpha3.LocalityLoadBalancerSetting\x12\x33\n\x10\x64ns_refresh_rate\x18$ \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x1c\n\x14\x64isable_report_batch\x18% \x01(\x08\x12 \n\x18report_batch_max_entries\x18& \x01(\r\x12\x38\n\x15report_batch_max_time\x18\' \x01(\x0b\x32\x19.google.protobuf.Duration\x12J\n\x11h2_upgrade_policy\x18) \x01(\x0e\x32/.istio.mesh.v1alpha1.MeshConfig.H2UpgradePolicy\x12!\n\x19inbound_cluster_stat_name\x18, \x01(\t\x12\"\n\x1aoutbound_cluster_stat_name\x18- \x01(\t\x12\x36\n\x0c\x63\x65rtificates\x18/ \x03(\x0b\x32 .istio.mesh.v1alpha1.Certificate\x1a\xa7\x01\n\x15OutboundTrafficPolicy\x12H\n\x04mode\x18\x01 \x01(\x0e\x32:.istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy.Mode\"D\n\x04Mode\x12\x11\n\rREGISTRY_ONLY\x10\x00\x12\r\n\tALLOW_ANY\x10\x01\"\x04\x08\x02\x10\x02*\x14VIRTUAL_SERVICE_ONLY\"9\n\x15IngressControllerMode\x12\x07\n\x03OFF\x10\x00\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x01\x12\n\n\x06STRICT\x10\x02\"&\n\nAuthPolicy\x12\x08\n\x04NONE\x10\x00\x12\x0e\n\nMUTUAL_TLS\x10\x01\"\'\n\x11\x41\x63\x63\x65ssLogEncoding\x12\x08\n\x04TEXT\x10\x00\x12\x08\n\x04JSON\x10\x01\"2\n\x0fH2UpgradePolicy\x12\x12\n\x0e\x44O_NOT_UPGRADE\x10\x00\x12\x0b\n\x07UPGRADE\x10\x01J\x04\x08\x0f\x10\x10J\x04\x08\x12\x10\x13\"\x9a\x01\n\x0c\x43onfigSource\x12\x0f\n\x07\x61\x64\x64ress\x18\x01 \x01(\t\x12<\n\x0ctls_settings\x18\x02 \x01(\x0b\x32&.istio.networking.v1alpha3.TLSSettings\x12;\n\x14subscribed_resources\x18\x03 \x03(\x0e\x32\x1d.istio.mesh.v1alpha1.Resource\"5\n\x0b\x43\x65rtificate\x12\x13\n\x0bsecret_name\x18\x01 \x01(\t\x12\x11\n\tdns_names\x18\x02 \x03(\t* \n\x08Resource\x12\x14\n\x10SERVICE_REGISTRY\x10\x00\x42\x1cZ\x1aistio.io/api/mesh/v1alpha1b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,mesh_dot_v1alpha1_dot_proxy__pb2.DESCRIPTOR,networking_dot_v1alpha3_dot_destination__rule__pb2.DESCRIPTOR,])
_RESOURCE = _descriptor.EnumDescriptor(
name='Resource',
full_name='istio.mesh.v1alpha1.Resource',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SERVICE_REGISTRY', index=0, number=0,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2848,
serialized_end=2880,
)
_sym_db.RegisterEnumDescriptor(_RESOURCE)
Resource = enum_type_wrapper.EnumTypeWrapper(_RESOURCE)
SERVICE_REGISTRY = 0
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE = _descriptor.EnumDescriptor(
name='Mode',
full_name='istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy.Mode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REGISTRY_ONLY', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALLOW_ANY', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2362,
serialized_end=2430,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE)
_MESHCONFIG_INGRESSCONTROLLERMODE = _descriptor.EnumDescriptor(
name='IngressControllerMode',
full_name='istio.mesh.v1alpha1.MeshConfig.IngressControllerMode',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OFF', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRICT', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2432,
serialized_end=2489,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_INGRESSCONTROLLERMODE)
_MESHCONFIG_AUTHPOLICY = _descriptor.EnumDescriptor(
name='AuthPolicy',
full_name='istio.mesh.v1alpha1.MeshConfig.AuthPolicy',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MUTUAL_TLS', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2491,
serialized_end=2529,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_AUTHPOLICY)
_MESHCONFIG_ACCESSLOGENCODING = _descriptor.EnumDescriptor(
name='AccessLogEncoding',
full_name='istio.mesh.v1alpha1.MeshConfig.AccessLogEncoding',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TEXT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JSON', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2531,
serialized_end=2570,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_ACCESSLOGENCODING)
_MESHCONFIG_H2UPGRADEPOLICY = _descriptor.EnumDescriptor(
name='H2UpgradePolicy',
full_name='istio.mesh.v1alpha1.MeshConfig.H2UpgradePolicy',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DO_NOT_UPGRADE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPGRADE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2572,
serialized_end=2622,
)
_sym_db.RegisterEnumDescriptor(_MESHCONFIG_H2UPGRADEPOLICY)
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY = _descriptor.Descriptor(
name='OutboundTrafficPolicy',
full_name='istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mode', full_name='istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy.mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2263,
serialized_end=2430,
)
_MESHCONFIG = _descriptor.Descriptor(
name='MeshConfig',
full_name='istio.mesh.v1alpha1.MeshConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mixer_check_server', full_name='istio.mesh.v1alpha1.MeshConfig.mixer_check_server', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mixer_report_server', full_name='istio.mesh.v1alpha1.MeshConfig.mixer_report_server', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_policy_checks', full_name='istio.mesh.v1alpha1.MeshConfig.disable_policy_checks', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_mixer_http_reports', full_name='istio.mesh.v1alpha1.MeshConfig.disable_mixer_http_reports', index=3,
number=48, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='policy_check_fail_open', full_name='istio.mesh.v1alpha1.MeshConfig.policy_check_fail_open', index=4,
number=25, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sidecar_to_telemetry_session_affinity', full_name='istio.mesh.v1alpha1.MeshConfig.sidecar_to_telemetry_session_affinity', index=5,
number=30, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proxy_listen_port', full_name='istio.mesh.v1alpha1.MeshConfig.proxy_listen_port', index=6,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='proxy_http_port', full_name='istio.mesh.v1alpha1.MeshConfig.proxy_http_port', index=7,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connect_timeout', full_name='istio.mesh.v1alpha1.MeshConfig.connect_timeout', index=8,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_detection_timeout', full_name='istio.mesh.v1alpha1.MeshConfig.protocol_detection_timeout', index=9,
number=42, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tcp_keepalive', full_name='istio.mesh.v1alpha1.MeshConfig.tcp_keepalive', index=10,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ingress_class', full_name='istio.mesh.v1alpha1.MeshConfig.ingress_class', index=11,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ingress_service', full_name='istio.mesh.v1alpha1.MeshConfig.ingress_service', index=12,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ingress_controller_mode', full_name='istio.mesh.v1alpha1.MeshConfig.ingress_controller_mode', index=13,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='auth_policy', full_name='istio.mesh.v1alpha1.MeshConfig.auth_policy', index=14,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rds_refresh_delay', full_name='istio.mesh.v1alpha1.MeshConfig.rds_refresh_delay', index=15,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_tracing', full_name='istio.mesh.v1alpha1.MeshConfig.enable_tracing', index=16,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_log_file', full_name='istio.mesh.v1alpha1.MeshConfig.access_log_file', index=17,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_log_format', full_name='istio.mesh.v1alpha1.MeshConfig.access_log_format', index=18,
number=24, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='access_log_encoding', full_name='istio.mesh.v1alpha1.MeshConfig.access_log_encoding', index=19,
number=27, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_envoy_access_log_service', full_name='istio.mesh.v1alpha1.MeshConfig.enable_envoy_access_log_service', index=20,
number=40, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_config', full_name='istio.mesh.v1alpha1.MeshConfig.default_config', index=21,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mixer_address', full_name='istio.mesh.v1alpha1.MeshConfig.mixer_address', index=22,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outbound_traffic_policy', full_name='istio.mesh.v1alpha1.MeshConfig.outbound_traffic_policy', index=23,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_client_side_policy_check', full_name='istio.mesh.v1alpha1.MeshConfig.enable_client_side_policy_check', index=24,
number=19, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sds_uds_path', full_name='istio.mesh.v1alpha1.MeshConfig.sds_uds_path', index=25,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sds_refresh_delay', full_name='istio.mesh.v1alpha1.MeshConfig.sds_refresh_delay', index=26,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\030\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config_sources', full_name='istio.mesh.v1alpha1.MeshConfig.config_sources', index=27,
number=22, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_auto_mtls', full_name='istio.mesh.v1alpha1.MeshConfig.enable_auto_mtls', index=28,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_sds_token_mount', full_name='istio.mesh.v1alpha1.MeshConfig.enable_sds_token_mount', index=29,
number=23, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sds_use_k8s_sa_jwt', full_name='istio.mesh.v1alpha1.MeshConfig.sds_use_k8s_sa_jwt', index=30,
number=29, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trust_domain', full_name='istio.mesh.v1alpha1.MeshConfig.trust_domain', index=31,
number=26, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trust_domain_aliases', full_name='istio.mesh.v1alpha1.MeshConfig.trust_domain_aliases', index=32,
number=46, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_service_export_to', full_name='istio.mesh.v1alpha1.MeshConfig.default_service_export_to', index=33,
number=31, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_virtual_service_export_to', full_name='istio.mesh.v1alpha1.MeshConfig.default_virtual_service_export_to', index=34,
number=32, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_destination_rule_export_to', full_name='istio.mesh.v1alpha1.MeshConfig.default_destination_rule_export_to', index=35,
number=33, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='root_namespace', full_name='istio.mesh.v1alpha1.MeshConfig.root_namespace', index=36,
number=34, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='locality_lb_setting', full_name='istio.mesh.v1alpha1.MeshConfig.locality_lb_setting', index=37,
number=35, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dns_refresh_rate', full_name='istio.mesh.v1alpha1.MeshConfig.dns_refresh_rate', index=38,
number=36, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='disable_report_batch', full_name='istio.mesh.v1alpha1.MeshConfig.disable_report_batch', index=39,
number=37, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='report_batch_max_entries', full_name='istio.mesh.v1alpha1.MeshConfig.report_batch_max_entries', index=40,
number=38, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='report_batch_max_time', full_name='istio.mesh.v1alpha1.MeshConfig.report_batch_max_time', index=41,
number=39, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='h2_upgrade_policy', full_name='istio.mesh.v1alpha1.MeshConfig.h2_upgrade_policy', index=42,
number=41, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inbound_cluster_stat_name', full_name='istio.mesh.v1alpha1.MeshConfig.inbound_cluster_stat_name', index=43,
number=44, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outbound_cluster_stat_name', full_name='istio.mesh.v1alpha1.MeshConfig.outbound_cluster_stat_name', index=44,
number=45, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='certificates', full_name='istio.mesh.v1alpha1.MeshConfig.certificates', index=45,
number=47, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_MESHCONFIG_OUTBOUNDTRAFFICPOLICY, ],
enum_types=[
_MESHCONFIG_INGRESSCONTROLLERMODE,
_MESHCONFIG_AUTHPOLICY,
_MESHCONFIG_ACCESSLOGENCODING,
_MESHCONFIG_H2UPGRADEPOLICY,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=187,
serialized_end=2634,
)
_CONFIGSOURCE = _descriptor.Descriptor(
name='ConfigSource',
full_name='istio.mesh.v1alpha1.ConfigSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='address', full_name='istio.mesh.v1alpha1.ConfigSource.address', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tls_settings', full_name='istio.mesh.v1alpha1.ConfigSource.tls_settings', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subscribed_resources', full_name='istio.mesh.v1alpha1.ConfigSource.subscribed_resources', index=2,
number=3, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2637,
serialized_end=2791,
)
_CERTIFICATE = _descriptor.Descriptor(
name='Certificate',
full_name='istio.mesh.v1alpha1.Certificate',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='secret_name', full_name='istio.mesh.v1alpha1.Certificate.secret_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dns_names', full_name='istio.mesh.v1alpha1.Certificate.dns_names', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2793,
serialized_end=2846,
)
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY.fields_by_name['mode'].enum_type = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY.containing_type = _MESHCONFIG
_MESHCONFIG_OUTBOUNDTRAFFICPOLICY_MODE.containing_type = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY
_MESHCONFIG.fields_by_name['connect_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['protocol_detection_timeout'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['tcp_keepalive'].message_type = networking_dot_v1alpha3_dot_destination__rule__pb2._CONNECTIONPOOLSETTINGS_TCPSETTINGS_TCPKEEPALIVE
_MESHCONFIG.fields_by_name['ingress_controller_mode'].enum_type = _MESHCONFIG_INGRESSCONTROLLERMODE
_MESHCONFIG.fields_by_name['auth_policy'].enum_type = _MESHCONFIG_AUTHPOLICY
_MESHCONFIG.fields_by_name['rds_refresh_delay'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['access_log_encoding'].enum_type = _MESHCONFIG_ACCESSLOGENCODING
_MESHCONFIG.fields_by_name['default_config'].message_type = mesh_dot_v1alpha1_dot_proxy__pb2._PROXYCONFIG
_MESHCONFIG.fields_by_name['outbound_traffic_policy'].message_type = _MESHCONFIG_OUTBOUNDTRAFFICPOLICY
_MESHCONFIG.fields_by_name['sds_refresh_delay'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['config_sources'].message_type = _CONFIGSOURCE
_MESHCONFIG.fields_by_name['enable_auto_mtls'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_MESHCONFIG.fields_by_name['locality_lb_setting'].message_type = networking_dot_v1alpha3_dot_destination__rule__pb2._LOCALITYLOADBALANCERSETTING
_MESHCONFIG.fields_by_name['dns_refresh_rate'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['report_batch_max_time'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_MESHCONFIG.fields_by_name['h2_upgrade_policy'].enum_type = _MESHCONFIG_H2UPGRADEPOLICY
_MESHCONFIG.fields_by_name['certificates'].message_type = _CERTIFICATE
_MESHCONFIG_INGRESSCONTROLLERMODE.containing_type = _MESHCONFIG
_MESHCONFIG_AUTHPOLICY.containing_type = _MESHCONFIG
_MESHCONFIG_ACCESSLOGENCODING.containing_type = _MESHCONFIG
_MESHCONFIG_H2UPGRADEPOLICY.containing_type = _MESHCONFIG
_CONFIGSOURCE.fields_by_name['tls_settings'].message_type = networking_dot_v1alpha3_dot_destination__rule__pb2._TLSSETTINGS
_CONFIGSOURCE.fields_by_name['subscribed_resources'].enum_type = _RESOURCE
DESCRIPTOR.message_types_by_name['MeshConfig'] = _MESHCONFIG
DESCRIPTOR.message_types_by_name['ConfigSource'] = _CONFIGSOURCE
DESCRIPTOR.message_types_by_name['Certificate'] = _CERTIFICATE
DESCRIPTOR.enum_types_by_name['Resource'] = _RESOURCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MeshConfig = _reflection.GeneratedProtocolMessageType('MeshConfig', (_message.Message,), {
'OutboundTrafficPolicy' : _reflection.GeneratedProtocolMessageType('OutboundTrafficPolicy', (_message.Message,), {
'DESCRIPTOR' : _MESHCONFIG_OUTBOUNDTRAFFICPOLICY,
'__module__' : 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.MeshConfig.OutboundTrafficPolicy)
})
,
'DESCRIPTOR' : _MESHCONFIG,
'__module__' : 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.MeshConfig)
})
_sym_db.RegisterMessage(MeshConfig)
_sym_db.RegisterMessage(MeshConfig.OutboundTrafficPolicy)
ConfigSource = _reflection.GeneratedProtocolMessageType('ConfigSource', (_message.Message,), {
'DESCRIPTOR' : _CONFIGSOURCE,
'__module__' : 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.ConfigSource)
})
_sym_db.RegisterMessage(ConfigSource)
Certificate = _reflection.GeneratedProtocolMessageType('Certificate', (_message.Message,), {
'DESCRIPTOR' : _CERTIFICATE,
'__module__' : 'mesh.v1alpha1.config_pb2'
# @@protoc_insertion_point(class_scope:istio.mesh.v1alpha1.Certificate)
})
_sym_db.RegisterMessage(Certificate)
DESCRIPTOR._options = None
_MESHCONFIG.fields_by_name['auth_policy']._options = None
_MESHCONFIG.fields_by_name['rds_refresh_delay']._options = None
_MESHCONFIG.fields_by_name['mixer_address']._options = None
_MESHCONFIG.fields_by_name['sds_uds_path']._options = None
_MESHCONFIG.fields_by_name['sds_refresh_delay']._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f72640bdf780be7bf1cfaf80fbf1f6bf7bf0616f | 1,758 | py | Python | fairseq/modules/fairseq_dropout.py | zhengzx-nlp/REDER | 7035e089e4d30b8090a2c3caa937b1e0ba27cedc | [
"MIT"
] | 18 | 2021-11-14T06:34:26.000Z | 2022-03-19T07:18:08.000Z | fairseq/modules/fairseq_dropout.py | zhengzx-nlp/REDER | 7035e089e4d30b8090a2c3caa937b1e0ba27cedc | [
"MIT"
] | 1 | 2021-12-03T07:23:36.000Z | 2021-12-10T08:32:36.000Z | fairseq/modules/fairseq_dropout.py | zhengzx-nlp/REDER | 7035e089e4d30b8090a2c3caa937b1e0ba27cedc | [
"MIT"
] | 2 | 2021-12-10T14:20:09.000Z | 2022-01-08T09:39:27.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List, Optional
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class FairseqDropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def forward(self, x, inplace: bool = False):
if self.training or self.apply_during_inference:
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
def extra_repr(self) -> str:
return 'p={}'.format(self.p)
def make_generation_fast_(
self,
name: str,
retain_dropout: bool = False,
retain_dropout_modules: Optional[List[str]] = None,
**kwargs
):
if retain_dropout:
if retain_dropout_modules is not None and self.module_name is None:
logger.warning(
'Cannot enable dropout during inference for module {} '
'because module_name was not set'.format(name)
)
elif (
retain_dropout_modules is None # if None, apply to all modules
or self.module_name in retain_dropout_modules
):
logger.info(
'Enabling dropout during inference for module: {}'.format(name)
)
self.apply_during_inference = True
else:
logger.info('Disabling dropout for module: {}'.format(name))
| 31.392857 | 83 | 0.600683 |
import logging
from typing import List, Optional
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
class FairseqDropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def forward(self, x, inplace: bool = False):
if self.training or self.apply_during_inference:
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
def extra_repr(self) -> str:
return 'p={}'.format(self.p)
def make_generation_fast_(
self,
name: str,
retain_dropout: bool = False,
retain_dropout_modules: Optional[List[str]] = None,
**kwargs
):
if retain_dropout:
if retain_dropout_modules is not None and self.module_name is None:
logger.warning(
'Cannot enable dropout during inference for module {} '
'because module_name was not set'.format(name)
)
elif (
retain_dropout_modules is None
or self.module_name in retain_dropout_modules
):
logger.info(
'Enabling dropout during inference for module: {}'.format(name)
)
self.apply_during_inference = True
else:
logger.info('Disabling dropout for module: {}'.format(name))
| true | true |
f726415ccf6a26910901b63bb701354b030b9ab9 | 4,077 | py | Python | qa/rpc-tests/test_script_address2.py | jalcantara1983/atixcoin | d3e941bf1dd911c224bb66a3e82bfecf1a5fefe6 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_script_address2.py | jalcantara1983/atixcoin | d3e941bf1dd911c224bb66a3e82bfecf1a5fefe6 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_script_address2.py | jalcantara1983/atixcoin | d3e941bf1dd911c224bb66a3e82bfecf1a5fefe6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test new Atixcoin multisig prefix functionality.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import decimal
class ScriptAddress2Test(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, []))
self.nodes.append(start_node(2, self.options.tmpdir, []))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")
assert_equal(multisig_addr[0], 'Q')
# Send to a new multisig address
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
block = self.nodes[1].generate(3)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
# Spend from the new multisig address
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
# Send to an old multisig address. The api addmultisigaddress
# can only generate a new address so we manually compute
# multisig_addr_old beforehand using an old client.
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")
assert_equal(multisig_addr_new, "QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe")
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
## Let's send to the old address. We can then find it in the
## new address with the new client. So basically the old
## address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
block = self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
| 40.366337 | 93 | 0.649497 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import decimal
class ScriptAddress2Test(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, []))
self.nodes.append(start_node(2, self.options.tmpdir, []))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 blocks")
addr = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
multisig_addr = self.nodes[0].addmultisigaddress(2, [addr, addr2], "multisigaccount")
assert_equal(multisig_addr[0], 'Q')
txid = self.nodes[1].sendtoaddress(multisig_addr, 1)
block = self.nodes[1].generate(3)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr in dest_addrs)
addr3 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount", addr3, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr3)
priv_keys = ["cU7eeLPKzXeKMeZvnEJhvZZ3tLqVF3XGeo1BbM8dnbmV7pP3Qg89",
"cTw7mRhSvTfzqCt6MFgBoTBqwBpYu2rWugisXcwjv4cAASh3iqPt"]
addrs = ["mj6gNGRXPXrD69R5ApjcsDerZGrYKSfb6v",
"mqET4JA3L7P7FoUjUP3F6m6YsLpCkyzzou"]
self.nodes[0].importprivkey(priv_keys[0])
self.nodes[0].importprivkey(priv_keys[1])
multisig_addr_new = self.nodes[0].addmultisigaddress(2, addrs, "multisigaccount2")
assert_equal(multisig_addr_new, "QZ974ZrPrmqMmm1PSVp4m8YEgo3bCQZBbe")
multisig_addr_old = "2N5nLwYz9qfnGdaFLpPn3gS6oYQbmLTWPjq"
he old
## address and the new one are the same thing.
txid = self.nodes[1].sendtoaddress(multisig_addr_old, 1)
block = self.nodes[1].generate(1)
self.sync_all()
tx = self.nodes[2].getrawtransaction(txid, 1)
dest_addrs = [tx["vout"][0]['scriptPubKey']['addresses'][0],
tx["vout"][1]['scriptPubKey']['addresses'][0]]
assert(multisig_addr_new in dest_addrs)
assert(multisig_addr_old not in dest_addrs)
# Spend from the new multisig address
addr4 = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendfrom("multisigaccount2", addr4, 0.8)
block = self.nodes[0].generate(2)
self.sync_all()
assert(self.nodes[0].getbalance("multisigaccount2", 1) < 0.2)
assert(self.nodes[1].listtransactions()[-1]['address'] == addr4)
if __name__ == '__main__':
ScriptAddress2Test().main()
| true | true |
f7264187ecb771c3ebeca5e866aa88ceae28828f | 1,923 | py | Python | src/tfchain/polyfill/encoding/ipaddr.py | GlenDC/threefold-wallet-electron | 440662a793d98781eb3bbf415ba8a482abed0288 | [
"MIT"
] | null | null | null | src/tfchain/polyfill/encoding/ipaddr.py | GlenDC/threefold-wallet-electron | 440662a793d98781eb3bbf415ba8a482abed0288 | [
"MIT"
] | 201 | 2019-05-20T15:06:05.000Z | 2019-07-16T12:48:59.000Z | src/tfchain/polyfill/encoding/ipaddr.py | GlenDC/threefold-wallet-electron | 440662a793d98781eb3bbf415ba8a482abed0288 | [
"MIT"
] | 1 | 2019-12-20T21:45:39.000Z | 2019-12-20T21:45:39.000Z | from tfchain.polyfill.encoding.jsmods.ipaddrjs import api as ipaddrjs
import tfchain.polyfill.array as jsarr
class IPAddress:
def __init__(self, value):
if isinstance(value, str):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.parse(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid str value {}: {}".format(value, err))
self._value = v
elif isinstance(value, (bytes, bytearray)) or jsarr.is_uint8_array(value):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.fromByteArray(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid raw value {}: {}".format(value, err))
self._value = v
elif isinstance(value, IPAddress):
self._value = value.value
else:
raise TypeError("value {} of type {} is not supported as an IPAddress".format(value, type(value)))
@property
def value(self):
return self._value
def is_ipv4(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv4;
""")
return result
def is_ipv6(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv6;
""")
return result
def __str__(self):
return self._value.toString()
def str(self):
return self.__str__()
def bytes(self):
v = self._value
__pragma__("js", "{}", """
v = new Uint8Array(v.toByteArray());
""")
return v
| 27.869565 | 110 | 0.481019 | from tfchain.polyfill.encoding.jsmods.ipaddrjs import api as ipaddrjs
import tfchain.polyfill.array as jsarr
class IPAddress:
def __init__(self, value):
if isinstance(value, str):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.parse(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid str value {}: {}".format(value, err))
self._value = v
elif isinstance(value, (bytes, bytearray)) or jsarr.is_uint8_array(value):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.fromByteArray(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid raw value {}: {}".format(value, err))
self._value = v
elif isinstance(value, IPAddress):
self._value = value.value
else:
raise TypeError("value {} of type {} is not supported as an IPAddress".format(value, type(value)))
@property
def value(self):
return self._value
def is_ipv4(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv4;
""")
return result
def is_ipv6(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv6;
""")
return result
def __str__(self):
return self._value.toString()
def str(self):
return self.__str__()
def bytes(self):
v = self._value
__pragma__("js", "{}", """
v = new Uint8Array(v.toByteArray());
""")
return v
| true | true |
f7264223ea3b2e4a8450d6eba91beec89e57b290 | 15,290 | py | Python | tensorflow/network.py | EricPedley/FCRN-DepthPrediction | 93aaed329e9e071c6d5c5a59e77a73a09684b156 | [
"BSD-2-Clause"
] | null | null | null | tensorflow/network.py | EricPedley/FCRN-DepthPrediction | 93aaed329e9e071c6d5c5a59e77a73a09684b156 | [
"BSD-2-Clause"
] | null | null | null | tensorflow/network.py | EricPedley/FCRN-DepthPrediction | 93aaed329e9e071c6d5c5a59e77a73a09684b156 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# ----------------------------------------------------------------------------------
# Commonly used layers and operations based on ethereon's implementation
# https://github.com/ethereon/caffe-tensorflow
# Slight modifications may apply. FCRN-specific operations have also been appended.
# ----------------------------------------------------------------------------------
# Thanks to *Helisa Dhamo* for the model conversion and integration into TensorFlow.
# ----------------------------------------------------------------------------------
DEFAULT_PADDING = 'SAME'
def get_incoming_shape(incoming):
""" Returns the incoming data shape """
if isinstance(incoming, tf.Tensor):
return incoming.get_shape().as_list()
elif type(incoming) in [np.array, list, tuple]:
return np.shape(incoming)
else:
raise Exception("Invalid incoming layer.")
def interleave(tensors, axis):
old_shape = get_incoming_shape(tensors[0])[1:]
new_shape = [-1] + old_shape
new_shape[axis] *= len(tensors)
return tf.reshape(tf.stack(tensors, axis + 1), new_shape)
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, batch, keep_prob, is_training, trainable = True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.batch_size = batch
self.keep_prob = keep_prob
self.is_training = is_training
self.setup()
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path, encoding='latin1').item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iter(data_dict[op_name].items()):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_layer_output(self, name):
return self.layers[name]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, dtype = 'float32', trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
input_data,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input_data.get_shape()[-1]
if (padding == 'SAME'):
input_data = tf.pad(input_data, [[0, 0], [(k_h - 1)//2, (k_h - 1)//2], [(k_w - 1)//2, (k_w - 1)//2], [0, 0]], "CONSTANT")
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding='VALID')
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input_data, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input_data)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def relu(self, input_data, name):
return tf.nn.relu(input_data, name=name)
@layer
def max_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input_data,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input_data,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def lrn(self, input_data, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input_data,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def fc(self, input_data, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
input_shape = input_data.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input_data, [-1, dim])
else:
feed_in, dim = (input_data, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def softmax(self, input_data, name):
input_shape = map(lambda v: v.value, input_data.get_shape())
if len(input_shape) > 2:
# For certain models (like NiN), the singleton spatial dimensions
# need to be explicitly squeezed, since they're not broadcast-able
# in TensorFlow's NHWC ordering (unlike Caffe's NCHW).
if input_shape[1] == 1 and input_shape[2] == 1:
input_data = tf.squeeze(input_data, squeeze_dims=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input_data, name)
@layer
def batch_normalization(self, input_data, name, scale_offset=True, relu=False):
with tf.variable_scope(name) as scope:
shape = [input_data.get_shape()[-1]]
pop_mean = tf.get_variable("mean", shape, initializer = tf.constant_initializer(0.0), trainable=False)
pop_var = tf.get_variable("variance", shape, initializer = tf.constant_initializer(1.0), trainable=False)
epsilon = 1e-4
decay = 0.999
if scale_offset:
scale = tf.get_variable("scale", shape, initializer = tf.constant_initializer(1.0))
offset = tf.get_variable("offset", shape, initializer = tf.constant_initializer(0.0))
else:
scale, offset = (None, None)
if self.is_training:
batch_mean, batch_var = tf.nn.moments(input_data, [0, 1, 2])
train_mean = tf.assign(pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
output = tf.nn.batch_normalization(input_data,
batch_mean, batch_var, offset, scale, epsilon, name = name)
else:
output = tf.nn.batch_normalization(input_data,
pop_mean, pop_var, offset, scale, epsilon, name = name)
if relu:
output = tf.nn.relu(output)
return output
@layer
def dropout(self, input_data, keep_prob, name):
return tf.nn.dropout(input_data, keep_prob, name=name)
def unpool_as_conv(self, size, input_data, id, stride = 1, ReLU = False, BN = True):
# Model upconvolutions (unpooling + convolution) as interleaving feature
# maps of four convolutions (A,B,C,D). Building block for up-projections.
# Convolution A (3x3)
# --------------------------------------------------
layerName = "layer%s_ConvA" % (id)
self.feed(input_data)
self.conv( 3, 3, size[3], stride, stride, name = layerName, padding = 'SAME', relu = False)
outputA = self.get_output()
# Convolution B (2x3)
# --------------------------------------------------
layerName = "layer%s_ConvB" % (id)
padded_input_B = tf.pad(input_data, [[0, 0], [1, 0], [1, 1], [0, 0]], "CONSTANT")
self.feed(padded_input_B)
self.conv(2, 3, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputB = self.get_output()
# Convolution C (3x2)
# --------------------------------------------------
layerName = "layer%s_ConvC" % (id)
padded_input_C = tf.pad(input_data, [[0, 0], [1, 1], [1, 0], [0, 0]], "CONSTANT")
self.feed(padded_input_C)
self.conv(3, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputC = self.get_output()
# Convolution D (2x2)
# --------------------------------------------------
layerName = "layer%s_ConvD" % (id)
padded_input_D = tf.pad(input_data, [[0, 0], [1, 0], [1, 0], [0, 0]], "CONSTANT")
self.feed(padded_input_D)
self.conv(2, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputD = self.get_output()
# Interleaving elements of the four feature maps
# --------------------------------------------------
left = interleave([outputA, outputB], axis=1) # columns
right = interleave([outputC, outputD], axis=1) # columns
Y = interleave([left, right], axis=2) # rows
if BN:
layerName = "layer%s_BN" % (id)
self.feed(Y)
self.batch_normalization(name = layerName, scale_offset = True, relu = False)
Y = self.get_output()
if ReLU:
Y = tf.nn.relu(Y, name = layerName)
return Y
def up_project(self, size, id, stride = 1, BN = True):
# Create residual upsampling layer (UpProjection)
input_data = self.get_output()
# Branch 1
id_br1 = "%s_br1" % (id)
# Interleaving Convs of 1st branch
out = self.unpool_as_conv(size, input_data, id_br1, stride, ReLU=True, BN=True)
# Convolution following the upProjection on the 1st branch
layerName = "layer%s_Conv" % (id)
self.feed(out)
self.conv(size[0], size[1], size[3], stride, stride, name = layerName, relu = False)
if BN:
layerName = "layer%s_BN" % (id)
self.batch_normalization(name = layerName, scale_offset=True, relu = False)
# Output of 1st branch
branch1_output = self.get_output()
# Branch 2
id_br2 = "%s_br2" % (id)
# Interleaving convolutions and output of 2nd branch
branch2_output = self.unpool_as_conv(size, input_data, id_br2, stride, ReLU=False)
# sum branches
layerName = "layer%s_Sum" % (id)
output = tf.add_n([branch1_output, branch2_output], name = layerName)
# ReLU
layerName = "layer%s_ReLU" % (id)
output = tf.nn.relu(output, name=layerName)
self.feed(output)
return self
| 39.205128 | 133 | 0.548136 | import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# https://github.com/ethereon/caffe-tensorflow
# Slight modifications may apply. FCRN-specific operations have also been appended.
# ----------------------------------------------------------------------------------
# Thanks to *Helisa Dhamo* for the model conversion and integration into TensorFlow.
# ----------------------------------------------------------------------------------
DEFAULT_PADDING = 'SAME'
def get_incoming_shape(incoming):
if isinstance(incoming, tf.Tensor):
return incoming.get_shape().as_list()
elif type(incoming) in [np.array, list, tuple]:
return np.shape(incoming)
else:
raise Exception("Invalid incoming layer.")
def interleave(tensors, axis):
old_shape = get_incoming_shape(tensors[0])[1:]
new_shape = [-1] + old_shape
new_shape[axis] *= len(tensors)
return tf.reshape(tf.stack(tensors, axis + 1), new_shape)
def layer(op):
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, batch, keep_prob, is_training, trainable = True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.batch_size = batch
self.keep_prob = keep_prob
self.is_training = is_training
self.setup()
def setup(self):
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
data_dict = np.load(data_path, encoding='latin1').item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in iter(data_dict[op_name].items()):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
return self.terminals[-1]
def get_layer_output(self, name):
return self.layers[name]
def get_unique_name(self, prefix):
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
return tf.get_variable(name, shape, dtype = 'float32', trainable=self.trainable)
def validate_padding(self, padding):
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
input_data,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding=DEFAULT_PADDING,
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = input_data.get_shape()[-1]
if (padding == 'SAME'):
input_data = tf.pad(input_data, [[0, 0], [(k_h - 1)//2, (k_h - 1)//2], [(k_w - 1)//2, (k_w - 1)//2], [0, 0]], "CONSTANT")
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding='VALID')
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
if group == 1:
# This is the common-case. Convolve the input without any further complications.
output = convolve(input_data, kernel)
else:
# Split the input into groups and then convolve each of them independently
input_groups = tf.split(3, group, input_data)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)]
# Concatenate the groups
output = tf.concat(3, output_groups)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def relu(self, input_data, name):
return tf.nn.relu(input_data, name=name)
@layer
def max_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input_data,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input_data,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def lrn(self, input_data, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input_data,
depth_radius=radius,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
@layer
def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
@layer
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
@layer
def fc(self, input_data, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
input_shape = input_data.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input_data, [-1, dim])
else:
feed_in, dim = (input_data, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
@layer
def softmax(self, input_data, name):
input_shape = map(lambda v: v.value, input_data.get_shape())
if len(input_shape) > 2:
# For certain models (like NiN), the singleton spatial dimensions
# need to be explicitly squeezed, since they're not broadcast-able
if input_shape[1] == 1 and input_shape[2] == 1:
input_data = tf.squeeze(input_data, squeeze_dims=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input_data, name)
@layer
def batch_normalization(self, input_data, name, scale_offset=True, relu=False):
with tf.variable_scope(name) as scope:
shape = [input_data.get_shape()[-1]]
pop_mean = tf.get_variable("mean", shape, initializer = tf.constant_initializer(0.0), trainable=False)
pop_var = tf.get_variable("variance", shape, initializer = tf.constant_initializer(1.0), trainable=False)
epsilon = 1e-4
decay = 0.999
if scale_offset:
scale = tf.get_variable("scale", shape, initializer = tf.constant_initializer(1.0))
offset = tf.get_variable("offset", shape, initializer = tf.constant_initializer(0.0))
else:
scale, offset = (None, None)
if self.is_training:
batch_mean, batch_var = tf.nn.moments(input_data, [0, 1, 2])
train_mean = tf.assign(pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
output = tf.nn.batch_normalization(input_data,
batch_mean, batch_var, offset, scale, epsilon, name = name)
else:
output = tf.nn.batch_normalization(input_data,
pop_mean, pop_var, offset, scale, epsilon, name = name)
if relu:
output = tf.nn.relu(output)
return output
@layer
def dropout(self, input_data, keep_prob, name):
return tf.nn.dropout(input_data, keep_prob, name=name)
def unpool_as_conv(self, size, input_data, id, stride = 1, ReLU = False, BN = True):
layerName = "layer%s_ConvA" % (id)
self.feed(input_data)
self.conv( 3, 3, size[3], stride, stride, name = layerName, padding = 'SAME', relu = False)
outputA = self.get_output()
layerName = "layer%s_ConvB" % (id)
padded_input_B = tf.pad(input_data, [[0, 0], [1, 0], [1, 1], [0, 0]], "CONSTANT")
self.feed(padded_input_B)
self.conv(2, 3, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputB = self.get_output()
layerName = "layer%s_ConvC" % (id)
padded_input_C = tf.pad(input_data, [[0, 0], [1, 1], [1, 0], [0, 0]], "CONSTANT")
self.feed(padded_input_C)
self.conv(3, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputC = self.get_output()
layerName = "layer%s_ConvD" % (id)
padded_input_D = tf.pad(input_data, [[0, 0], [1, 0], [1, 0], [0, 0]], "CONSTANT")
self.feed(padded_input_D)
self.conv(2, 2, size[3], stride, stride, name = layerName, padding = 'VALID', relu = False)
outputD = self.get_output()
left = interleave([outputA, outputB], axis=1)
right = interleave([outputC, outputD], axis=1)
Y = interleave([left, right], axis=2)
if BN:
layerName = "layer%s_BN" % (id)
self.feed(Y)
self.batch_normalization(name = layerName, scale_offset = True, relu = False)
Y = self.get_output()
if ReLU:
Y = tf.nn.relu(Y, name = layerName)
return Y
def up_project(self, size, id, stride = 1, BN = True):
input_data = self.get_output()
id_br1 = "%s_br1" % (id)
out = self.unpool_as_conv(size, input_data, id_br1, stride, ReLU=True, BN=True)
layerName = "layer%s_Conv" % (id)
self.feed(out)
self.conv(size[0], size[1], size[3], stride, stride, name = layerName, relu = False)
if BN:
layerName = "layer%s_BN" % (id)
self.batch_normalization(name = layerName, scale_offset=True, relu = False)
branch1_output = self.get_output()
id_br2 = "%s_br2" % (id)
branch2_output = self.unpool_as_conv(size, input_data, id_br2, stride, ReLU=False)
layerName = "layer%s_Sum" % (id)
output = tf.add_n([branch1_output, branch2_output], name = layerName)
layerName = "layer%s_ReLU" % (id)
output = tf.nn.relu(output, name=layerName)
self.feed(output)
return self
| true | true |
f726433b3c15d6223a75c1dacfab5a53d9b7791b | 350 | py | Python | app/__init__.py | ppyvras/flask_tutorial | 2c73d32c33fb80ef59bee8753500220afdd91cee | [
"MIT"
] | null | null | null | app/__init__.py | ppyvras/flask_tutorial | 2c73d32c33fb80ef59bee8753500220afdd91cee | [
"MIT"
] | null | null | null | app/__init__.py | ppyvras/flask_tutorial | 2c73d32c33fb80ef59bee8753500220afdd91cee | [
"MIT"
] | null | null | null | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
from app import routes, models
| 20.588235 | 39 | 0.805714 | from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
login = LoginManager(app)
login.login_view = 'login'
from app import routes, models
| true | true |
f7264465fb955d98f2220ac0a57a91bd63fff024 | 1,331 | py | Python | xlsxwriter/test/comparison/test_cond_format06.py | eddiechapman/XlsxWriter | c636117ab30e64e4b7b824c9105595c42887c2c9 | [
"BSD-2-Clause-FreeBSD"
] | 2,766 | 2015-01-02T17:36:42.000Z | 2022-03-31T09:23:30.000Z | xlsxwriter/test/comparison/test_cond_format06.py | xiaolanmeng86/XlsxWriter | 6c3ea23a410e8216eab8f5751e5544ffb444b3da | [
"BSD-2-Clause-FreeBSD"
] | 683 | 2015-01-03T09:55:02.000Z | 2022-03-31T07:18:15.000Z | xlsxwriter/test/comparison/test_cond_format06.py | xiaolanmeng86/XlsxWriter | 6c3ea23a410e8216eab8f5751e5544ffb444b3da | [
"BSD-2-Clause-FreeBSD"
] | 636 | 2015-01-05T01:57:08.000Z | 2022-03-25T18:42:41.000Z | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('cond_format06.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({
'pattern': 15,
'fg_color': '#FF0000',
'bg_color': '#FFFF00'
})
worksheet.write('A1', 10)
worksheet.write('A2', 20)
worksheet.write('A3', 30)
worksheet.write('A4', 40)
worksheet.conditional_format('A1',
{'type': 'cell',
'format': format1,
'criteria': '>',
'value': 7,
})
workbook.close()
self.assertExcelEqual()
| 26.62 | 88 | 0.496619 | true | true | |
f7264528198aeeae3d454c3855027f09c988ee7a | 1,793 | py | Python | _unittests/ut_special/test_tsp_kohonen.py | mohamedelkansouli/Ensae_py | 8bc867bd2081c259c793fadfa8be5dcc7bd1400b | [
"MIT"
] | null | null | null | _unittests/ut_special/test_tsp_kohonen.py | mohamedelkansouli/Ensae_py | 8bc867bd2081c259c793fadfa8be5dcc7bd1400b | [
"MIT"
] | null | null | null | _unittests/ut_special/test_tsp_kohonen.py | mohamedelkansouli/Ensae_py | 8bc867bd2081c259c793fadfa8be5dcc7bd1400b | [
"MIT"
] | null | null | null | """
@brief test log(time=10s)
"""
import os
import sys
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.ensae_teaching_cs.special.tsp_kohonen import pygame_simulation
from src.ensae_teaching_cs.helpers.video_helper import make_video
class TestTspKohonen(unittest.TestCase):
def test_image_video_kohonen(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_image_video_tsp_kohonen")
if is_travis_or_appveyor() in ("travis",):
# pygame.error: No available video device
return
import pygame
if is_travis_or_appveyor() == "circleci":
# os.environ["SDL_VIDEODRIVER"] = "x11"
flags = pygame.NOFRAME
else:
flags = 0
pygame_simulation(pygame, fLOG=fLOG, folder=temp,
nb=200 if __name__ == "__main__" else 20,
size=(400, 250), flags=flags)
files = os.listdir(temp)
assert len(files) > 9
png = [os.path.join(temp, _)
for _ in files if os.path.splitext(_)[-1] == ".png"]
assert len(png) > 0
out = os.path.join(temp, "tsp_kohonen.avi")
v = make_video(png, out, size=(200, 125), format="XVID", fps=20)
assert v is not None
if __name__ == "__main__":
unittest.main()
| 28.015625 | 72 | 0.591746 | import os
import sys
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.ensae_teaching_cs.special.tsp_kohonen import pygame_simulation
from src.ensae_teaching_cs.helpers.video_helper import make_video
class TestTspKohonen(unittest.TestCase):
def test_image_video_kohonen(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_image_video_tsp_kohonen")
if is_travis_or_appveyor() in ("travis",):
return
import pygame
if is_travis_or_appveyor() == "circleci":
flags = pygame.NOFRAME
else:
flags = 0
pygame_simulation(pygame, fLOG=fLOG, folder=temp,
nb=200 if __name__ == "__main__" else 20,
size=(400, 250), flags=flags)
files = os.listdir(temp)
assert len(files) > 9
png = [os.path.join(temp, _)
for _ in files if os.path.splitext(_)[-1] == ".png"]
assert len(png) > 0
out = os.path.join(temp, "tsp_kohonen.avi")
v = make_video(png, out, size=(200, 125), format="XVID", fps=20)
assert v is not None
if __name__ == "__main__":
unittest.main()
| true | true |
f7264580ec84ba5145f46f58d0d1265932dadf9b | 3,505 | py | Python | parler/tests/test_query_count.py | Yiling-J/django-parler | 23b8ae3348c05d4dded729389cc8129cd03d8c5d | [
"Apache-2.0"
] | 1 | 2020-01-25T05:23:00.000Z | 2020-01-25T05:23:00.000Z | parler/tests/test_query_count.py | Yiling-J/django-parler | 23b8ae3348c05d4dded729389cc8129cd03d8c5d | [
"Apache-2.0"
] | 3 | 2019-11-02T05:52:07.000Z | 2020-06-05T21:56:17.000Z | parler/tests/test_query_count.py | Yiling-J/django-parler | 23b8ae3348c05d4dded729389cc8129cd03d8c5d | [
"Apache-2.0"
] | 2 | 2019-06-10T21:45:05.000Z | 2019-07-10T17:16:35.000Z | import datetime as dt
from django.core.cache import cache
from django.utils import translation
from django.utils.timezone import now
from parler import appsettings
from .utils import AppTestCase, override_parler_settings
from .testapp.models import SimpleModel, DateTimeModel
class QueryCountTests(AppTestCase):
"""
Test model construction
"""
@classmethod
def setUpClass(cls):
super(QueryCountTests, cls).setUpClass()
cls.country_list = (
'Mexico',
'Monaco',
'Morocco',
'Netherlands',
'Norway',
'Poland',
'Portugal',
'Romania',
'Russia',
'South Africa',
)
for country in cls.country_list:
SimpleModel.objects.create(_current_language=cls.conf_fallback, tr_title=country)
DateTimeModel.objects.create(_current_language=cls.conf_fallback,
tr_title=country, datetime=now())
#def setUp(self):
# cache.clear()
def assertNumTranslatedQueries(self, num, qs, language_code=None):
# Use default language if available.
if language_code is None:
language_code = self.conf_fallback
# Easier to understand then a oneline lambda
# Using str(), not unicode() to be python 3 compatible.
def test_qs():
for obj in qs:
str(obj.tr_title)
# Queryset is not set to a language, the individual models
# will default to the currently active project language.
with translation.override(language_code):
self.assertNumQueries(num, test_qs)
def test_uncached_queries(self):
"""
Test that uncached queries work, albeit slowly.
"""
with override_parler_settings(PARLER_ENABLE_CACHING=False):
self.assertNumTranslatedQueries(1 + len(self.country_list), SimpleModel.objects.all())
def test_iteration_with_non_qs_methods(self):
"""
Test QuerySet methods that do not return QuerySets of models.
"""
# We have at least one object created in setUpClass.
obj = DateTimeModel.objects.all()[0]
self.assertEqual(
obj,
DateTimeModel.objects.language(self.conf_fallback).all()[0])
# Test iteration through QuerySet of non-model objects.
self.assertIsInstance(
DateTimeModel.objects.language(self.conf_fallback).dates(
'datetime', 'day')[0],
dt.date)
def test_prefetch_queries(self):
"""
Test that .prefetch_related() works
"""
with override_parler_settings(PARLER_ENABLE_CACHING=False):
self.assertNumTranslatedQueries(2, SimpleModel.objects.prefetch_related('translations'))
def test_model_cache_queries(self):
"""
Test that the ``_translations_cache`` works.
"""
cache.clear()
with override_parler_settings(PARLER_ENABLE_CACHING=False):
qs = SimpleModel.objects.all()
self.assertNumTranslatedQueries(1 + len(self.country_list), qs)
self.assertNumTranslatedQueries(0, qs) # All should be cached on the QuerySet and object now.
qs = SimpleModel.objects.prefetch_related('translations')
self.assertNumTranslatedQueries(2, qs)
self.assertNumTranslatedQueries(0, qs) # All should be cached on the QuerySet and object now.
| 34.029126 | 107 | 0.633666 | import datetime as dt
from django.core.cache import cache
from django.utils import translation
from django.utils.timezone import now
from parler import appsettings
from .utils import AppTestCase, override_parler_settings
from .testapp.models import SimpleModel, DateTimeModel
class QueryCountTests(AppTestCase):
@classmethod
def setUpClass(cls):
super(QueryCountTests, cls).setUpClass()
cls.country_list = (
'Mexico',
'Monaco',
'Morocco',
'Netherlands',
'Norway',
'Poland',
'Portugal',
'Romania',
'Russia',
'South Africa',
)
for country in cls.country_list:
SimpleModel.objects.create(_current_language=cls.conf_fallback, tr_title=country)
DateTimeModel.objects.create(_current_language=cls.conf_fallback,
tr_title=country, datetime=now())
def assertNumTranslatedQueries(self, num, qs, language_code=None):
if language_code is None:
language_code = self.conf_fallback
def test_qs():
for obj in qs:
str(obj.tr_title)
with translation.override(language_code):
self.assertNumQueries(num, test_qs)
def test_uncached_queries(self):
with override_parler_settings(PARLER_ENABLE_CACHING=False):
self.assertNumTranslatedQueries(1 + len(self.country_list), SimpleModel.objects.all())
def test_iteration_with_non_qs_methods(self):
obj = DateTimeModel.objects.all()[0]
self.assertEqual(
obj,
DateTimeModel.objects.language(self.conf_fallback).all()[0])
self.assertIsInstance(
DateTimeModel.objects.language(self.conf_fallback).dates(
'datetime', 'day')[0],
dt.date)
def test_prefetch_queries(self):
with override_parler_settings(PARLER_ENABLE_CACHING=False):
self.assertNumTranslatedQueries(2, SimpleModel.objects.prefetch_related('translations'))
def test_model_cache_queries(self):
cache.clear()
with override_parler_settings(PARLER_ENABLE_CACHING=False):
qs = SimpleModel.objects.all()
self.assertNumTranslatedQueries(1 + len(self.country_list), qs)
self.assertNumTranslatedQueries(0, qs)
qs = SimpleModel.objects.prefetch_related('translations')
self.assertNumTranslatedQueries(2, qs)
self.assertNumTranslatedQueries(0, qs)
| true | true |
f72645db2eb553529b0393f9bc851543b325fd14 | 3,880 | py | Python | webots_ros2_universal_robot/webots_ros2_universal_robot/follow_joint_trajectory_client.py | TaoYibo1866/webots_ros2 | a72c164825663cebbfd27e0649ea51d3abf9bbed | [
"Apache-2.0"
] | null | null | null | webots_ros2_universal_robot/webots_ros2_universal_robot/follow_joint_trajectory_client.py | TaoYibo1866/webots_ros2 | a72c164825663cebbfd27e0649ea51d3abf9bbed | [
"Apache-2.0"
] | 6 | 2019-08-09T08:04:37.000Z | 2019-08-14T15:05:35.000Z | webots_ros2_universal_robot/webots_ros2_universal_robot/follow_joint_trajectory_client.py | omichel/webots_ros2 | 5b59d0b1fbeff4c3f75a447bd152c10853f4691b | [
"Apache-2.0"
] | null | null | null | # Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic client for the FollowJointTrajectory action used for multi-robot demonstration."""
from action_msgs.msg import GoalStatus
from control_msgs.action import FollowJointTrajectory
from control_msgs.msg import JointTrajectoryControllerState
from trajectory_msgs.msg import JointTrajectoryPoint
from builtin_interfaces.msg import Duration
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
class FollowJointTrajectoryClient(Node):
def __init__(self, name, prefix):
super().__init__(name)
self.__client = ActionClient(self, FollowJointTrajectory, prefix + '/follow_joint_trajectory')
self.__state_subscriber = self.create_subscription(
JointTrajectoryControllerState, prefix + '/state', self.__on_state_received, 1
)
self.__received_states_counter = 0
self.__remaining_iteration = 0
self.__current_trajectory = None
self.__get_result_future = None
self.__send_goal_future = None
def __on_goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected by action server.')
return
self.get_logger().info('Goal accepted by action server.')
self.__get_result_future = goal_handle.get_result_async()
self.__get_result_future.add_done_callback(self.__on_get_result_callback)
def __on_get_result_callback(self, future):
status = future.result().status
if status == GoalStatus.STATUS_SUCCEEDED:
self.get_logger().info('Goal succeeded.')
else:
self.get_logger().info('Goal failed with status: {0}'.format(status))
if self.__remaining_iteration > 0:
self.send_goal(self.__current_trajectory, self.__remaining_iteration - 1)
else:
rclpy.shutdown()
def __on_state_received(self, _):
self.__received_states_counter += 1
def send_goal(self, trajectory, iteration=1):
self.get_logger().info('Waiting for action server to be ready...')
self.__client.wait_for_server()
# WORKAROUND: The `wait_for_server()` method reports the `joint_trajectory_controller` node is ready even though it
# needs a bit more time to get ready to receive commands.
while self.__received_states_counter < 1:
rclpy.spin_once(self)
self.__current_trajectory = trajectory
self.__remaining_iteration = iteration - 1
goal_message = FollowJointTrajectory.Goal()
goal_message.trajectory.joint_names = trajectory['joint_names']
for point in trajectory['points']:
trajectory_point = JointTrajectoryPoint(
positions=point['positions'],
time_from_start=Duration(
sec=point['time_from_start']['sec'],
nanosec=point['time_from_start']['nanosec']
)
)
goal_message.trajectory.points.append(trajectory_point)
self.get_logger().info('Sending goal request...')
self.__send_goal_future = self.__client.send_goal_async(
goal_message
)
self.__send_goal_future.add_done_callback(self.__on_goal_response_callback)
| 40 | 123 | 0.698711 |
from action_msgs.msg import GoalStatus
from control_msgs.action import FollowJointTrajectory
from control_msgs.msg import JointTrajectoryControllerState
from trajectory_msgs.msg import JointTrajectoryPoint
from builtin_interfaces.msg import Duration
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
class FollowJointTrajectoryClient(Node):
def __init__(self, name, prefix):
super().__init__(name)
self.__client = ActionClient(self, FollowJointTrajectory, prefix + '/follow_joint_trajectory')
self.__state_subscriber = self.create_subscription(
JointTrajectoryControllerState, prefix + '/state', self.__on_state_received, 1
)
self.__received_states_counter = 0
self.__remaining_iteration = 0
self.__current_trajectory = None
self.__get_result_future = None
self.__send_goal_future = None
def __on_goal_response_callback(self, future):
goal_handle = future.result()
if not goal_handle.accepted:
self.get_logger().info('Goal rejected by action server.')
return
self.get_logger().info('Goal accepted by action server.')
self.__get_result_future = goal_handle.get_result_async()
self.__get_result_future.add_done_callback(self.__on_get_result_callback)
def __on_get_result_callback(self, future):
status = future.result().status
if status == GoalStatus.STATUS_SUCCEEDED:
self.get_logger().info('Goal succeeded.')
else:
self.get_logger().info('Goal failed with status: {0}'.format(status))
if self.__remaining_iteration > 0:
self.send_goal(self.__current_trajectory, self.__remaining_iteration - 1)
else:
rclpy.shutdown()
def __on_state_received(self, _):
self.__received_states_counter += 1
def send_goal(self, trajectory, iteration=1):
self.get_logger().info('Waiting for action server to be ready...')
self.__client.wait_for_server()
while self.__received_states_counter < 1:
rclpy.spin_once(self)
self.__current_trajectory = trajectory
self.__remaining_iteration = iteration - 1
goal_message = FollowJointTrajectory.Goal()
goal_message.trajectory.joint_names = trajectory['joint_names']
for point in trajectory['points']:
trajectory_point = JointTrajectoryPoint(
positions=point['positions'],
time_from_start=Duration(
sec=point['time_from_start']['sec'],
nanosec=point['time_from_start']['nanosec']
)
)
goal_message.trajectory.points.append(trajectory_point)
self.get_logger().info('Sending goal request...')
self.__send_goal_future = self.__client.send_goal_async(
goal_message
)
self.__send_goal_future.add_done_callback(self.__on_goal_response_callback)
| true | true |
f726466b10a38e592d89b680d7031e520070c599 | 3,697 | py | Python | packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | 12 | 2021-09-13T18:31:09.000Z | 2022-03-31T12:10:28.000Z | packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | 5 | 2021-12-01T04:34:07.000Z | 2022-01-28T08:28:18.000Z | packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/convert_xlnet_original_tf_checkpoint_to_pytorch.py | lego0901/pytea | 8ede650def2e68f4610ba816451d8b9e28f09f76 | [
"MIT"
] | 3 | 2022-01-18T10:56:05.000Z | 2022-01-28T01:46:43.000Z | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
import argparse
import os
import torch
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import logging
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def convert_xlnet_checkpoint_to_pytorch(
tf_checkpoint_path, bert_config_file, pytorch_dump_folder_path, finetuning_task=None
):
# Initialise PyTorch model
config = XLNetConfig.from_json_file(bert_config_file)
finetuning_task = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print("Building PyTorch XLNetForSequenceClassification model from configuration: {}".format(str(config)))
config.finetuning_task = finetuning_task
config.num_labels = GLUE_TASKS_NUM_LABELS[finetuning_task]
model = XLNetForSequenceClassification(config)
elif "squad" in finetuning_task:
config.finetuning_task = finetuning_task
model = XLNetForQuestionAnswering(config)
else:
model = XLNetLMHeadModel(config)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(model, config, tf_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
args = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 32.147826 | 117 | 0.712199 |
import argparse
import os
import torch
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import logging
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def convert_xlnet_checkpoint_to_pytorch(
tf_checkpoint_path, bert_config_file, pytorch_dump_folder_path, finetuning_task=None
):
config = XLNetConfig.from_json_file(bert_config_file)
finetuning_task = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print("Building PyTorch XLNetForSequenceClassification model from configuration: {}".format(str(config)))
config.finetuning_task = finetuning_task
config.num_labels = GLUE_TASKS_NUM_LABELS[finetuning_task]
model = XLNetForSequenceClassification(config)
elif "squad" in finetuning_task:
config.finetuning_task = finetuning_task
model = XLNetForQuestionAnswering(config)
else:
model = XLNetLMHeadModel(config)
load_tf_weights_in_xlnet(model, config, tf_checkpoint_path)
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
args = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| true | true |
f72646ac93dd6fd9d9c8c4a8152f818f740a9035 | 2,689 | py | Python | tests/test_parsers.py | tos-kamiya/d2vg | 72ce1cb900a219f9d7bc3982e234a4498be52d5a | [
"BSD-2-Clause"
] | 3 | 2021-11-17T08:07:50.000Z | 2021-12-19T04:35:15.000Z | tests/test_parsers.py | tos-kamiya/d2vg | 72ce1cb900a219f9d7bc3982e234a4498be52d5a | [
"BSD-2-Clause"
] | null | null | null | tests/test_parsers.py | tos-kamiya/d2vg | 72ce1cb900a219f9d7bc3982e234a4498be52d5a | [
"BSD-2-Clause"
] | null | null | null | import unittest
from pathlib import Path
import re
import tempfile
import d2vg
class ParserTest(unittest.TestCase):
def test_text_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.txt"
content = "1st line.\n2nd line.\n"
p.write_text(content)
read_content = d2vg.parsers.read_text_file(str(p))
self.assertEqual(read_content, content)
def test_html_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.html"
content = """<!DOCTYPE html>
<html>
<body>
<p>1st paragraph.</p>
<p>2nd paragraph.</p>
</body>
</html>"""
p.write_text(content)
read_content = d2vg.parsers.html_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "html\n1st paragraph.\n2nd paragraph.")
def test_pdf_file(self):
from borb.pdf.canvas.layout.page_layout.multi_column_layout import (
SingleColumnLayout,
)
from borb.pdf.canvas.layout.text.paragraph import Paragraph
from borb.pdf.document import Document
from borb.pdf.page.page import Page
from borb.pdf.pdf import PDF
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.pdf"
pdf = Document()
page = Page()
pdf.append_page(page)
layout = SingleColumnLayout(page)
layout.add(Paragraph("1st paragraph."))
layout.add(Paragraph("2nd paragraph."))
with open(p, "wb") as pdf_file_handle:
PDF.dumps(pdf_file_handle, pdf)
read_content = d2vg.parsers.pdf_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "1st paragraph.\n2nd paragraph.")
# !! not working !! ref: https://stackoverflow.com/questions/58186869/how-to-fix-the-bug-modulenotfounderror-no-module-named-exceptions-when-impo
# def test_docx_file(self):
# from docx import Document
# with tempfile.TemporaryDirectory() as tempdir:
# p = Path(tempdir) / 'a.docx'
# document = Document()
# document.add_paragraph("1st paragraph.")
# document.add_paragraph("1st paragraph.")
# document.save(str(p))
# read_content = d2vg.parsers.docx_parse(str(p))
# read_content = re.sub(r'\n+', r'\n', read_content).rstrip()
# self.assertEqual(read_content, '1st paragraph.\n2nd paragraph.')
if __name__ == "__main__":
unittest.main()
| 34.474359 | 149 | 0.61138 | import unittest
from pathlib import Path
import re
import tempfile
import d2vg
class ParserTest(unittest.TestCase):
def test_text_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.txt"
content = "1st line.\n2nd line.\n"
p.write_text(content)
read_content = d2vg.parsers.read_text_file(str(p))
self.assertEqual(read_content, content)
def test_html_file(self):
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.html"
content = """<!DOCTYPE html>
<html>
<body>
<p>1st paragraph.</p>
<p>2nd paragraph.</p>
</body>
</html>"""
p.write_text(content)
read_content = d2vg.parsers.html_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "html\n1st paragraph.\n2nd paragraph.")
def test_pdf_file(self):
from borb.pdf.canvas.layout.page_layout.multi_column_layout import (
SingleColumnLayout,
)
from borb.pdf.canvas.layout.text.paragraph import Paragraph
from borb.pdf.document import Document
from borb.pdf.page.page import Page
from borb.pdf.pdf import PDF
with tempfile.TemporaryDirectory() as tempdir:
p = Path(tempdir) / "a.pdf"
pdf = Document()
page = Page()
pdf.append_page(page)
layout = SingleColumnLayout(page)
layout.add(Paragraph("1st paragraph."))
layout.add(Paragraph("2nd paragraph."))
with open(p, "wb") as pdf_file_handle:
PDF.dumps(pdf_file_handle, pdf)
read_content = d2vg.parsers.pdf_parse(str(p))
read_content = re.sub(r"\n+", r"\n", read_content).rstrip()
self.assertEqual(read_content, "1st paragraph.\n2nd paragraph.")
if __name__ == "__main__":
unittest.main()
| true | true |
f72646f0022b7bc1e1b506d20a786c3a402e9a98 | 422 | py | Python | blog/migrations/0008_post_snippet.py | cs130-w21/15 | 3e0bfd3662e930e5b67416939a976029ddad6436 | [
"Apache-2.0"
] | null | null | null | blog/migrations/0008_post_snippet.py | cs130-w21/15 | 3e0bfd3662e930e5b67416939a976029ddad6436 | [
"Apache-2.0"
] | 13 | 2021-01-14T06:09:55.000Z | 2021-03-08T08:56:36.000Z | blog/migrations/0008_post_snippet.py | cs130-w21/15 | 3e0bfd3662e930e5b67416939a976029ddad6436 | [
"Apache-2.0"
] | 1 | 2021-04-07T18:20:21.000Z | 2021-04-07T18:20:21.000Z | # Generated by Django 3.1.5 on 2021-03-05 05:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20210305_0539'),
]
operations = [
migrations.AddField(
model_name='post',
name='snippet',
field=models.CharField(default='Click link below to see more.', max_length=200),
),
]
| 22.210526 | 92 | 0.606635 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20210305_0539'),
]
operations = [
migrations.AddField(
model_name='post',
name='snippet',
field=models.CharField(default='Click link below to see more.', max_length=200),
),
]
| true | true |
f72646f6d0c53d7d935da1ea9870115892eb14ad | 4,209 | py | Python | ALGOs/LearningRate_Decay.py | iamharshit/ML_works | b0bb53c5a60312719d15e25b727c54cbab65b4af | [
"MIT"
] | 1 | 2016-11-29T04:28:09.000Z | 2016-11-29T04:28:09.000Z | ALGOs/LearningRate_Decay.py | iamharshit/ML_works | b0bb53c5a60312719d15e25b727c54cbab65b4af | [
"MIT"
] | null | null | null | ALGOs/LearningRate_Decay.py | iamharshit/ML_works | b0bb53c5a60312719d15e25b727c54cbab65b4af | [
"MIT"
] | null | null | null | #1.
class LR_LinearDecay():
'''
Function : -Learning rate decay linearly(a constant factor) after each epoch
-Eg. LR= 5, 5.8, 5.6, 5.4, ........
'''
def __init__(self, min_lr=1e-5, max_lr=1e-2, epochs=None):
super().__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.total_iterations = epochs
def get_lr(self, epoch_i):
'''Return the updated learning rate.'''
self.iteration = epoch_i
x = self.iteration / self.total_iterations
return self.max_lr - (self.max_lr-self.min_lr) * x
#2.
class LR_StepDecay():
'''
Function : -Learning rate decay stepwise(a varing factor) after every few epochs
- Eg. LR= 5, 5, 5, 2.5, 2.5, 2.5, 1.25, 1.25, 1.25, ......
'''
def __init__(self, max_lr=1e-2, step_size=3, decay_factor=2):
super().__init__()
self.max_lr = max_lr
self.step_size = step_size # meaning: update happens after every `step_size` iterations
self.decay_factor = decay_factor
def get_lr(self, epoch_i):
'''Return the updated learning rate.'''
self.iteration = epoch_i
x = self.iteration / self.step_size
return self.max_lr / (self.decay_factor ** int(x) )
#3.
class LR_ExponentialDecay():
'''
Function : Learning rate decay exponentially( exp(k*t) ) after each epoch
'''
def __init__(self, max_lr=1e-2, decay_factor=0.1):
super().__init__()
self.max_lr = max_lr
self.decay_factor = decay_factor
def get_lr(self, epoch_i):
'''Return the updated learning rate.'''
return self.max_lr / math.exp(self.decay_factor*epoch_i )
#4.
class LR_Cyclical():
'''
Function - This implements 2 techniques: 1.Linear annealing(to better converge at minima)
2.Learning rate linear restart(to escape local minima)
'''
def __init__(self, min_lr=1e-5, max_lr=1e-2, step_size=10, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'):
super(CyclicLR, self).__init__()
import math
self.min_lr = min_lr
self.max_lr = max_lr
self.step_size = step_size
self.mode = mode
if scale_fn == None:
if(self.mode == 'triangular'):
self.scale_fn = lambda x: 1.
elif(self.mode == 'triangular2'):
self.scale_fn = lambda x: 1/(2.**(x-1))
elif(self.mode == 'exp_range'):
self.scale_fn = lambda x: gamma**(x)
else:
self.scale_fn = scale_fn
def get_lr(self, epoch_i):
cycle = math.floor(1 + epoch_i/(2*self.step_size))
x = math.abs (epoch_i/self.step_size - 2*cycle + 1)
return self.base_lr + (self.max_lr-self.min_lr) * (1-x) * self.scale_fn(cycle)
#5.
class LR_StochasticGradientDescentWithWarmRestarts():
'''
Function - This implements 2 techniques: 1.Cosine annealing(to better converge at minima)
2.Learning rate sharp restart(to escape local minima)
'''
def __init__(self, min_lr, max_lr, epoch_steps=10):
self.min_lr = min_lr
self.max_lr = max_lr
self.epoch_steps = epoch_steps # restarts after every `epoch_steps` no. of epochs
self.batch_since_restart = 0
def get_lr(self, epoch_i):
'''Calculate the learning rate.'''
self.batch_since_restart = epoch_i % epoch_steps
fraction_to_restart = self.batch_since_restart / (epoch_steps)
return self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(fraction_to_restart * np.pi))
'''
Example.
>> epoch_n = 50
>> lr = LR_LinearDecay(epochs = epoch_n)
>> for epoch_i in range(1,epoch_n+1):
learning_rate = lr.get_lr(epoch_i = epoch_i )
'''
| 30.280576 | 128 | 0.549062 |
class LR_LinearDecay():
'''
Function : -Learning rate decay linearly(a constant factor) after each epoch
-Eg. LR= 5, 5.8, 5.6, 5.4, ........
'''
def __init__(self, min_lr=1e-5, max_lr=1e-2, epochs=None):
super().__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.total_iterations = epochs
def get_lr(self, epoch_i):
'''Return the updated learning rate.'''
self.iteration = epoch_i
x = self.iteration / self.total_iterations
return self.max_lr - (self.max_lr-self.min_lr) * x
class LR_StepDecay():
'''
Function : -Learning rate decay stepwise(a varing factor) after every few epochs
- Eg. LR= 5, 5, 5, 2.5, 2.5, 2.5, 1.25, 1.25, 1.25, ......
'''
def __init__(self, max_lr=1e-2, step_size=3, decay_factor=2):
super().__init__()
self.max_lr = max_lr
self.step_size = step_size
self.decay_factor = decay_factor
def get_lr(self, epoch_i):
'''Return the updated learning rate.'''
self.iteration = epoch_i
x = self.iteration / self.step_size
return self.max_lr / (self.decay_factor ** int(x) )
class LR_ExponentialDecay():
'''
Function : Learning rate decay exponentially( exp(k*t) ) after each epoch
'''
def __init__(self, max_lr=1e-2, decay_factor=0.1):
super().__init__()
self.max_lr = max_lr
self.decay_factor = decay_factor
def get_lr(self, epoch_i):
'''Return the updated learning rate.'''
return self.max_lr / math.exp(self.decay_factor*epoch_i )
class LR_Cyclical():
'''
Function - This implements 2 techniques: 1.Linear annealing(to better converge at minima)
2.Learning rate linear restart(to escape local minima)
'''
def __init__(self, min_lr=1e-5, max_lr=1e-2, step_size=10, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'):
super(CyclicLR, self).__init__()
import math
self.min_lr = min_lr
self.max_lr = max_lr
self.step_size = step_size
self.mode = mode
if scale_fn == None:
if(self.mode == 'triangular'):
self.scale_fn = lambda x: 1.
elif(self.mode == 'triangular2'):
self.scale_fn = lambda x: 1/(2.**(x-1))
elif(self.mode == 'exp_range'):
self.scale_fn = lambda x: gamma**(x)
else:
self.scale_fn = scale_fn
def get_lr(self, epoch_i):
cycle = math.floor(1 + epoch_i/(2*self.step_size))
x = math.abs (epoch_i/self.step_size - 2*cycle + 1)
return self.base_lr + (self.max_lr-self.min_lr) * (1-x) * self.scale_fn(cycle)
class LR_StochasticGradientDescentWithWarmRestarts():
'''
Function - This implements 2 techniques: 1.Cosine annealing(to better converge at minima)
2.Learning rate sharp restart(to escape local minima)
'''
def __init__(self, min_lr, max_lr, epoch_steps=10):
self.min_lr = min_lr
self.max_lr = max_lr
self.epoch_steps = epoch_steps
self.batch_since_restart = 0
def get_lr(self, epoch_i):
'''Calculate the learning rate.'''
self.batch_since_restart = epoch_i % epoch_steps
fraction_to_restart = self.batch_since_restart / (epoch_steps)
return self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(fraction_to_restart * np.pi))
'''
Example.
>> epoch_n = 50
>> lr = LR_LinearDecay(epochs = epoch_n)
>> for epoch_i in range(1,epoch_n+1):
learning_rate = lr.get_lr(epoch_i = epoch_i )
'''
| false | true |
f7264824f603075011c4ae3509f47ec148f2cec0 | 12,224 | py | Python | tests/test_dates.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | tests/test_dates.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | tests/test_dates.py | robot2051/dto-digitalmarketplace-utils | e581be6396c12473697398b0ec9d253c564a324b | [
"MIT"
] | null | null | null | # coding: utf-8
import pytest
import mock
import workdays
import datetime
import dmutils.dates as dates_package
class TestPublishingDates():
def test_get_publishing_dates_formats_time(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2015, 5, 22, 20, 39, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
assert dates_package.datetime.utcnow() == datetime.datetime(2015, 5, 22, 20, 39, 39, 417900)
assert dates_package.get_publishing_dates(brief)['closing_time'] == '11:59 pm'
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_monday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 6, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_tuesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 7, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_wednesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_thursday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_friday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
'lotSlug': 'digital-specialists'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_saturday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 16, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_sunday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 17, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_monday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 18, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_tuesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 18, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 19, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_wednesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 19, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 20, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_thursday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 20, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 21, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_friday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 21, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_saturday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 23, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_sunday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 24, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_no_requirementLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_1_week_requirementsLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
'requirementsLength': '1 week'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 6, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 8, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_2_week_requirementsLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
'requirementsLength': '2 weeks'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_published_at_key_is_a_date_object(self):
brief = {
'publishedAt': datetime.datetime(2016, 1, 4, 12, 0, 0),
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
| 49.489879 | 115 | 0.641525 |
import pytest
import mock
import workdays
import datetime
import dmutils.dates as dates_package
class TestPublishingDates():
def test_get_publishing_dates_formats_time(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2015, 5, 22, 20, 39, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
assert dates_package.datetime.utcnow() == datetime.datetime(2015, 5, 22, 20, 39, 39, 417900)
assert dates_package.get_publishing_dates(brief)['closing_time'] == '11:59 pm'
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_monday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 6, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_tuesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 7, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_wednesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 8, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_thursday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_friday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
'lotSlug': 'digital-specialists'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_saturday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 16, 23, 59, 59)
def test_get_publishing_dates_for_one_week_briefs_are_correct_if_published_on_a_sunday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900)
brief = {
'requirementsLength': '1 week',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 17, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_monday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 4, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 18, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_tuesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 5, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 12, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 18, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 19, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_wednesday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 6, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 13, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 19, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 20, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_thursday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 7, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 14, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 20, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 21, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_friday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 8, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 21, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_saturday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 9, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 23, 23, 59, 59)
def test_get_publishing_dates_for_two_week_briefs_are_correct_if_published_on_a_sunday(self):
with mock.patch('dmutils.dates.datetime') as mock_date:
mock_date.utcnow.return_value = datetime.datetime(2016, 7, 10, 15, 21, 39, 417900)
brief = {
'requirementsLength': '2 weeks',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 7, 15, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 7, 22, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 7, 24, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_no_requirementLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_1_week_requirementsLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
'requirementsLength': '1 week'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 6, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 8, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_brief_is_published_with_2_week_requirementsLength(self):
brief = {
'publishedAt': u'2016-01-04T12:00:00.00000Z',
'requirementsLength': '2 weeks'
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
def test_get_publishing_dates_returns_correct_dates_if_published_at_key_is_a_date_object(self):
brief = {
'publishedAt': datetime.datetime(2016, 1, 4, 12, 0, 0),
}
dates = dates_package.get_publishing_dates(brief)
assert dates['questions_close'] == datetime.datetime(2016, 1, 11, 23, 59, 59)
assert dates['answers_close'] == datetime.datetime(2016, 1, 15, 23, 59, 59)
assert dates['closing_date'] == datetime.datetime(2016, 1, 18, 23, 59, 59)
| true | true |
f726488d8cbb6bc2a5748013e73cd7f6e42b06b9 | 20,570 | py | Python | sagemaker-debugger/model_specific_realtime_analysis/bert_attention_head_view/entry_point/data.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 2,327 | 2020-03-01T09:47:34.000Z | 2021-11-25T12:38:42.000Z | sagemaker-debugger/model_specific_realtime_analysis/bert_attention_head_view/entry_point/data.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 209 | 2020-03-01T17:14:12.000Z | 2021-11-08T20:35:42.000Z | sagemaker-debugger/model_specific_realtime_analysis/bert_attention_head_view/entry_point/data.py | jpmarques19/tensorflwo-test | 0ff8b06e0415075c7269820d080284a42595bb2e | [
"Apache-2.0"
] | 686 | 2020-03-03T17:24:51.000Z | 2021-11-25T23:39:12.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and DMLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT for QA datasets."""
import collections
import multiprocessing as mp
import time
from functools import partial
from mxnet.gluon.data import SimpleDataset
from gluonnlp.data.utils import whitespace_splitter
import numpy as np
__all__ = ['SQuADTransform', '\rocess_dataset']
class SquadExample:
"""A single training/test example for SQuAD question.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
example_id,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.example_id = example_id
def _worker_fn(example, transform):
"""Function for processing data in worker process."""
feature = transform(example)
return feature
def preprocess_dataset(dataset, transform, num_workers=8):
"""Use multiprocessing to perform transform for dataset.
Parameters
----------
dataset: dataset-like object
Source dataset.
transform: callable
Transformer function.
num_workers: int, default 8
The number of multiprocessing workers to use for data preprocessing.
"""
worker_fn = partial(_worker_fn, transform=transform)
start = time.time()
pool = mp.Pool(num_workers)
dataset_transform = []
dataset_len = []
for data in pool.map(worker_fn, dataset):
if data:
for _data in data:
dataset_transform.append(_data[:-1])
dataset_len.append(_data[-1])
dataset = SimpleDataset(dataset_transform).transform(
lambda x: (x[0], x[1], x[2], x[3], x[4], x[5]))
end = time.time()
pool.close()
print('Done! Transform dataset costs %.2f seconds.' % (end-start))
return dataset, dataset_len
class SQuADFeature:
"""Single feature of a single example transform of the SQuAD question.
"""
def __init__(self,
example_id,
qas_id,
doc_tokens,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
valid_length,
segment_ids,
start_position,
end_position,
is_impossible):
self.example_id = example_id
self.qas_id = qas_id
self.doc_tokens = doc_tokens
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.valid_length = valid_length
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class SQuADTransform:
"""Dataset Transformation for BERT-style QA.
The transformation is processed in the following steps:
- Convert from gluonnlp.data.SQuAD's record to SquadExample.
- Tokenize the question_text in the example.
- For examples where the document is too long,
use a sliding window to split into multiple features and
record whether each token is a maximum context.
- Tokenize the split document chunks.
- Combine the token of question_text with the token
of the document and insert [CLS] and [SEP].
- Generate the start position and end position of the answer.
- Generate valid length.
E.g:
Inputs:
question_text: 'When did BBC Japan begin broadcasting?'
doc_tokens: ['BBC','Japan','was','a','general','entertainment','channel,',
'which','operated','between','December','2004','and','April',
'2006.','It','ceased','operations','after','its','Japanese',
'distributor','folded.']
start_position: 10
end_position: 11
orig_answer_text: 'December 2004'
Processed:
tokens: ['[CLS]','when','did','bbc','japan','begin','broadcasting','?',
'[SEP]','bbc','japan','was','a','general','entertainment','channel',
',','which','operated','between','december','2004','and','april',
'2006','.','it','ceased','operations','after','its','japanese',
'distributor','folded','.','[SEP]']
segment_ids: [0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
start_position: 20
end_position: 21
valid_length: 36
Because of the sliding window approach taken to scoring documents, a single
token can appear in multiple documents.
So you need to record whether each token is a maximum context. E.g.
Doc: the man went to the store and bought a gallon of milk
Span A: the man went to the
Span B: to the store and bought
Span C: and bought a gallon of
...
Now the word 'bought' will have two scores from spans B and C. We only
want to consider the score with "maximum context", which we define as
the *minimum* of its left and right context (the *sum* of left and
right context will always be the same, of course).
In the example the maximum context for 'bought' would be span C since
it has 1 left context and 3 right context, while span B has 4 left context
and 0 right context.
Parameters
----------
tokenizer : BERTTokenizer.
Tokenizer for the sentences.
labels : list of int.
List of all label ids for the classification task.
max_seq_length : int, default 384
Maximum sequence length of the sentences.
doc_stride : int, default 128
When splitting up a long document into chunks,
how much stride to take between chunks.
max_query_length : int, default 64
The maximum length of the query tokens.
is_pad : bool, default True
Whether to pad the sentences to maximum length.
is_training : bool, default True
Whether to run training.
do_lookup : bool, default True
Whether to do vocabulary lookup for convert tokens to indices.
"""
def __init__(self,
tokenizer,
max_seq_length=384,
doc_stride=128,
max_query_length=64,
is_pad=True,
is_training=True,
do_lookup=True):
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.max_query_length = max_query_length
self.doc_stride = doc_stride
self.is_pad = is_pad
self.is_training = is_training
self.do_lookup = do_lookup
def _is_whitespace(self, c):
if c == ' ' or c == '\t' or c == '\r' or c == '\n' or ord(
c) == 0x202F:
return True
return False
def _toSquadExample(self, record):
example_id = record[0]
qas_id = record[1]
question_text = record[2]
paragraph_text = record[3]
orig_answer_text = record[4][0] if record[4] else ''
answer_offset = record[5][0] if record[5] else ''
is_impossible = record[6] if len(record) == 7 else False
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if self._is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
start_position = -1
end_position = -1
if self.is_training:
if not is_impossible:
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[
answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = ' '.join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = ' '.join(
whitespace_splitter(orig_answer_text.strip()))
if actual_text.find(cleaned_answer_text) == -1:
print('Could not find answer: %s vs. %s' %
(actual_text, cleaned_answer_text))
return None
else:
start_position = -1
end_position = -1
orig_answer_text = ''
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
example_id=example_id,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
return example
def _transform(self, *record):
example = self._toSquadExample(record)
if not example:
return None
padding = self.tokenizer.vocab.padding_token
if self.do_lookup:
padding = self.tokenizer.vocab[padding]
features = []
query_tokens = self.tokenizer(example.question_text)
if len(query_tokens) > self.max_query_length:
query_tokens = query_tokens[0:self.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = self.tokenizer(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if self.is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if self.is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position +
1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position,
self.tokenizer, example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
'DocSpan', ['start', 'length'])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, self.doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append(self.tokenizer.vocab.cls_token)
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(
doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(1)
if self.do_lookup:
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
else:
input_ids = tokens
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
valid_length = len(input_ids)
# Zero-pad up to the sequence length.
if self.is_pad:
while len(input_ids) < self.max_seq_length:
input_ids.append(padding)
segment_ids.append(padding)
assert len(input_ids) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
start_position = 0
end_position = 0
if self.is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start
and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if self.is_training and example.is_impossible:
start_position = 0
end_position = 0
features.append(SQuADFeature(example_id=example.example_id,
qas_id=example.qas_id,
doc_tokens=example.doc_tokens,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
valid_length=valid_length,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
return features
def __call__(self, record):
examples = self._transform(*record)
if not examples:
return None
features = []
for _example in examples:
feature = []
feature.append(_example.example_id)
feature.append(_example.input_ids)
feature.append(_example.segment_ids)
feature.append(_example.valid_length)
feature.append(_example.start_position)
feature.append(_example.end_position)
feature.append(len(_example.input_ids))
features.append(feature)
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = ' '.join(tokenizer(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = ' '.join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + \
0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
| 38.958333 | 84 | 0.597083 |
import collections
import multiprocessing as mp
import time
from functools import partial
from mxnet.gluon.data import SimpleDataset
from gluonnlp.data.utils import whitespace_splitter
import numpy as np
__all__ = ['SQuADTransform', '\rocess_dataset']
class SquadExample:
def __init__(self,
qas_id,
question_text,
doc_tokens,
example_id,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.example_id = example_id
def _worker_fn(example, transform):
feature = transform(example)
return feature
def preprocess_dataset(dataset, transform, num_workers=8):
worker_fn = partial(_worker_fn, transform=transform)
start = time.time()
pool = mp.Pool(num_workers)
dataset_transform = []
dataset_len = []
for data in pool.map(worker_fn, dataset):
if data:
for _data in data:
dataset_transform.append(_data[:-1])
dataset_len.append(_data[-1])
dataset = SimpleDataset(dataset_transform).transform(
lambda x: (x[0], x[1], x[2], x[3], x[4], x[5]))
end = time.time()
pool.close()
print('Done! Transform dataset costs %.2f seconds.' % (end-start))
return dataset, dataset_len
class SQuADFeature:
def __init__(self,
example_id,
qas_id,
doc_tokens,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
valid_length,
segment_ids,
start_position,
end_position,
is_impossible):
self.example_id = example_id
self.qas_id = qas_id
self.doc_tokens = doc_tokens
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.valid_length = valid_length
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class SQuADTransform:
def __init__(self,
tokenizer,
max_seq_length=384,
doc_stride=128,
max_query_length=64,
is_pad=True,
is_training=True,
do_lookup=True):
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.max_query_length = max_query_length
self.doc_stride = doc_stride
self.is_pad = is_pad
self.is_training = is_training
self.do_lookup = do_lookup
def _is_whitespace(self, c):
if c == ' ' or c == '\t' or c == '\r' or c == '\n' or ord(
c) == 0x202F:
return True
return False
def _toSquadExample(self, record):
example_id = record[0]
qas_id = record[1]
question_text = record[2]
paragraph_text = record[3]
orig_answer_text = record[4][0] if record[4] else ''
answer_offset = record[5][0] if record[5] else ''
is_impossible = record[6] if len(record) == 7 else False
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if self._is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
start_position = -1
end_position = -1
if self.is_training:
if not is_impossible:
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[
answer_offset + answer_length - 1]
actual_text = ' '.join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = ' '.join(
whitespace_splitter(orig_answer_text.strip()))
if actual_text.find(cleaned_answer_text) == -1:
print('Could not find answer: %s vs. %s' %
(actual_text, cleaned_answer_text))
return None
else:
start_position = -1
end_position = -1
orig_answer_text = ''
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
example_id=example_id,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
return example
def _transform(self, *record):
example = self._toSquadExample(record)
if not example:
return None
padding = self.tokenizer.vocab.padding_token
if self.do_lookup:
padding = self.tokenizer.vocab[padding]
features = []
query_tokens = self.tokenizer(example.question_text)
if len(query_tokens) > self.max_query_length:
query_tokens = query_tokens[0:self.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = self.tokenizer(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if self.is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if self.is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position +
1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position,
self.tokenizer, example.orig_answer_text)
max_tokens_for_doc = self.max_seq_length - len(query_tokens) - 3
_DocSpan = collections.namedtuple(
'DocSpan', ['start', 'length'])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, self.doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append(self.tokenizer.vocab.cls_token)
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(
tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(
doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append(self.tokenizer.vocab.sep_token)
segment_ids.append(1)
if self.do_lookup:
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
else:
input_ids = tokens
valid_length = len(input_ids)
if self.is_pad:
while len(input_ids) < self.max_seq_length:
input_ids.append(padding)
segment_ids.append(padding)
assert len(input_ids) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
start_position = 0
end_position = 0
if self.is_training and not example.is_impossible:
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start
and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if self.is_training and example.is_impossible:
start_position = 0
end_position = 0
features.append(SQuADFeature(example_id=example.example_id,
qas_id=example.qas_id,
doc_tokens=example.doc_tokens,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
valid_length=valid_length,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
return features
def __call__(self, record):
examples = self._transform(*record)
if not examples:
return None
features = []
for _example in examples:
feature = []
feature.append(_example.example_id)
feature.append(_example.input_ids)
feature.append(_example.segment_ids)
feature.append(_example.valid_length)
feature.append(_example.start_position)
feature.append(_example.end_position)
feature.append(len(_example.input_ids))
features.append(feature)
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
tok_answer_text = ' '.join(tokenizer(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = ' '.join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + \
0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
| true | true |
f7264918230895dea85690103d818973bdb62a3f | 242 | py | Python | prefect/aircraftlib/__init__.py | andersy005/brouillons-quotidien | 468ebcc3327f96a6eb3a9a26460790c4f34fdc85 | [
"MIT"
] | null | null | null | prefect/aircraftlib/__init__.py | andersy005/brouillons-quotidien | 468ebcc3327f96a6eb3a9a26460790c4f34fdc85 | [
"MIT"
] | null | null | null | prefect/aircraftlib/__init__.py | andersy005/brouillons-quotidien | 468ebcc3327f96a6eb3a9a26460790c4f34fdc85 | [
"MIT"
] | null | null | null | # flake8: noqa
from .analysis import add_airline_info, clean_vector
from .database import Database
from .openflights import fetch_reference_data
from .opensky import fetch_live_aircraft_data
from .position import Area, Position, bounding_box
| 34.571429 | 52 | 0.85124 |
from .analysis import add_airline_info, clean_vector
from .database import Database
from .openflights import fetch_reference_data
from .opensky import fetch_live_aircraft_data
from .position import Area, Position, bounding_box
| true | true |
f726495538536b17d16dcc758c9c8febfb1dc64a | 13,874 | py | Python | official/vision/beta/configs/retinanet.py | melG81/models | d9ed5232648228ad58b9d50e29d8fe3bb6aa7c4a | [
"Apache-2.0"
] | 1 | 2021-05-12T08:34:32.000Z | 2021-05-12T08:34:32.000Z | official/vision/beta/configs/retinanet.py | melG81/models | d9ed5232648228ad58b9d50e29d8fe3bb6aa7c4a | [
"Apache-2.0"
] | null | null | null | official/vision/beta/configs/retinanet.py | melG81/models | d9ed5232648228ad58b9d50e29d8fe3bb6aa7c4a | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""RetinaNet configuration definition."""
import os
from typing import List, Optional
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.beta.configs import backbones
from official.vision.beta.configs import common
from official.vision.beta.configs import decoders
# pylint: disable=missing-class-docstring
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
regenerate_source_id: bool = False
@dataclasses.dataclass
class TfExampleDecoderLabelMap(hyperparams.Config):
regenerate_source_id: bool = False
label_map: str = ''
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = TfExampleDecoder()
label_map_decoder: TfExampleDecoderLabelMap = TfExampleDecoderLabelMap()
@dataclasses.dataclass
class Parser(hyperparams.Config):
num_channels: int = 3
match_threshold: float = 0.5
unmatched_threshold: float = 0.5
aug_rand_hflip: bool = False
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
skip_crowd_during_training: bool = True
max_num_instances: int = 100
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
input_path: str = ''
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'bfloat16'
decoder: DataDecoder = DataDecoder()
parser: Parser = Parser()
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
@dataclasses.dataclass
class Anchor(hyperparams.Config):
num_scales: int = 3
aspect_ratios: List[float] = dataclasses.field(
default_factory=lambda: [0.5, 1.0, 2.0])
anchor_size: float = 4.0
@dataclasses.dataclass
class Losses(hyperparams.Config):
focal_loss_alpha: float = 0.25
focal_loss_gamma: float = 1.5
huber_loss_delta: float = 0.1
box_loss_weight: int = 50
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class AttributeHead(hyperparams.Config):
name: str = ''
type: str = 'regression'
size: int = 1
@dataclasses.dataclass
class RetinaNetHead(hyperparams.Config):
num_convs: int = 4
num_filters: int = 256
use_separable_conv: bool = False
attribute_heads: Optional[List[AttributeHead]] = None
@dataclasses.dataclass
class DetectionGenerator(hyperparams.Config):
pre_nms_top_k: int = 5000
pre_nms_score_threshold: float = 0.05
nms_iou_threshold: float = 0.5
max_num_detections: int = 100
use_batched_nms: bool = False
@dataclasses.dataclass
class RetinaNet(hyperparams.Config):
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 7
anchor: Anchor = Anchor()
backbone: backbones.Backbone = backbones.Backbone(
type='resnet', resnet=backbones.ResNet())
decoder: decoders.Decoder = decoders.Decoder(
type='fpn', fpn=decoders.FPN())
head: RetinaNetHead = RetinaNetHead()
detection_generator: DetectionGenerator = DetectionGenerator()
norm_activation: common.NormActivation = common.NormActivation()
@dataclasses.dataclass
class RetinaNetTask(cfg.TaskConfig):
model: RetinaNet = RetinaNet()
train_data: DataConfig = DataConfig(is_training=True)
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all' # all or backbone
annotation_file: Optional[str] = None
per_category_metrics: bool = False
@exp_factory.register_config_factory('retinanet')
def retinanet() -> cfg.ExperimentConfig:
"""RetinaNet general config."""
return cfg.ExperimentConfig(
task=RetinaNetTask(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('retinanet_resnetfpn_coco')
def retinanet_resnetfpn_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=RetinaNetTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
num_classes=91,
input_size=[640, 640, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.5, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=72 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
57 * steps_per_epoch, 67 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_coco')
def retinanet_spinenet_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet using SpineNet backbone."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 640
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet',
spinenet=backbones.SpineNet(
model_id='49', stochastic_depth_drop_rate=0.2)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=4e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=500 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
475 * steps_per_epoch, 490 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_mobile_coco')
def retinanet_spinenet_mobile_coco() -> cfg.ExperimentConfig:
"""COCO object detection with RetinaNet using Mobile SpineNet backbone."""
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 384
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet_mobile',
spinenet_mobile=backbones.SpineNetMobile(
model_id='49', stochastic_depth_drop_rate=0.2)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
head=RetinaNetHead(num_filters=48, use_separable_conv=True),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=3e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=600 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
575 * steps_per_epoch, 590 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 35.302799 | 95 | 0.605305 |
import os
from typing import List, Optional
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.beta.configs import backbones
from official.vision.beta.configs import common
from official.vision.beta.configs import decoders
@dataclasses.dataclass
class TfExampleDecoder(hyperparams.Config):
regenerate_source_id: bool = False
@dataclasses.dataclass
class TfExampleDecoderLabelMap(hyperparams.Config):
regenerate_source_id: bool = False
label_map: str = ''
@dataclasses.dataclass
class DataDecoder(hyperparams.OneOfConfig):
type: Optional[str] = 'simple_decoder'
simple_decoder: TfExampleDecoder = TfExampleDecoder()
label_map_decoder: TfExampleDecoderLabelMap = TfExampleDecoderLabelMap()
@dataclasses.dataclass
class Parser(hyperparams.Config):
num_channels: int = 3
match_threshold: float = 0.5
unmatched_threshold: float = 0.5
aug_rand_hflip: bool = False
aug_scale_min: float = 1.0
aug_scale_max: float = 1.0
skip_crowd_during_training: bool = True
max_num_instances: int = 100
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
input_path: str = ''
global_batch_size: int = 0
is_training: bool = False
dtype: str = 'bfloat16'
decoder: DataDecoder = DataDecoder()
parser: Parser = Parser()
shuffle_buffer_size: int = 10000
file_type: str = 'tfrecord'
@dataclasses.dataclass
class Anchor(hyperparams.Config):
num_scales: int = 3
aspect_ratios: List[float] = dataclasses.field(
default_factory=lambda: [0.5, 1.0, 2.0])
anchor_size: float = 4.0
@dataclasses.dataclass
class Losses(hyperparams.Config):
focal_loss_alpha: float = 0.25
focal_loss_gamma: float = 1.5
huber_loss_delta: float = 0.1
box_loss_weight: int = 50
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class AttributeHead(hyperparams.Config):
name: str = ''
type: str = 'regression'
size: int = 1
@dataclasses.dataclass
class RetinaNetHead(hyperparams.Config):
num_convs: int = 4
num_filters: int = 256
use_separable_conv: bool = False
attribute_heads: Optional[List[AttributeHead]] = None
@dataclasses.dataclass
class DetectionGenerator(hyperparams.Config):
pre_nms_top_k: int = 5000
pre_nms_score_threshold: float = 0.05
nms_iou_threshold: float = 0.5
max_num_detections: int = 100
use_batched_nms: bool = False
@dataclasses.dataclass
class RetinaNet(hyperparams.Config):
num_classes: int = 0
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 7
anchor: Anchor = Anchor()
backbone: backbones.Backbone = backbones.Backbone(
type='resnet', resnet=backbones.ResNet())
decoder: decoders.Decoder = decoders.Decoder(
type='fpn', fpn=decoders.FPN())
head: RetinaNetHead = RetinaNetHead()
detection_generator: DetectionGenerator = DetectionGenerator()
norm_activation: common.NormActivation = common.NormActivation()
@dataclasses.dataclass
class RetinaNetTask(cfg.TaskConfig):
model: RetinaNet = RetinaNet()
train_data: DataConfig = DataConfig(is_training=True)
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all'
annotation_file: Optional[str] = None
per_category_metrics: bool = False
@exp_factory.register_config_factory('retinanet')
def retinanet() -> cfg.ExperimentConfig:
return cfg.ExperimentConfig(
task=RetinaNetTask(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
COCO_INPUT_PATH_BASE = 'coco'
COCO_TRAIN_EXAMPLES = 118287
COCO_VAL_EXAMPLES = 5000
@exp_factory.register_config_factory('retinanet_resnetfpn_coco')
def retinanet_resnetfpn_coco() -> cfg.ExperimentConfig:
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=RetinaNetTask(
init_checkpoint='gs://cloud-tpu-checkpoints/vision-2.0/resnet50_imagenet/ckpt-28080',
init_checkpoint_modules='backbone',
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
num_classes=91,
input_size=[640, 640, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=1e-4),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.5, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=72 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
57 * steps_per_epoch, 67 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 500,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_coco')
def retinanet_spinenet_coco() -> cfg.ExperimentConfig:
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 640
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet',
spinenet=backbones.SpineNet(
model_id='49', stochastic_depth_drop_rate=0.2)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=4e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=500 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
475 * steps_per_epoch, 490 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
@exp_factory.register_config_factory('retinanet_spinenet_mobile_coco')
def retinanet_spinenet_mobile_coco() -> cfg.ExperimentConfig:
train_batch_size = 256
eval_batch_size = 8
steps_per_epoch = COCO_TRAIN_EXAMPLES // train_batch_size
input_size = 384
config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='float32'),
task=RetinaNetTask(
annotation_file=os.path.join(COCO_INPUT_PATH_BASE,
'instances_val2017.json'),
model=RetinaNet(
backbone=backbones.Backbone(
type='spinenet_mobile',
spinenet_mobile=backbones.SpineNetMobile(
model_id='49', stochastic_depth_drop_rate=0.2)),
decoder=decoders.Decoder(
type='identity', identity=decoders.Identity()),
head=RetinaNetHead(num_filters=48, use_separable_conv=True),
anchor=Anchor(anchor_size=3),
norm_activation=common.NormActivation(
use_sync_bn=True, activation='swish'),
num_classes=91,
input_size=[input_size, input_size, 3],
min_level=3,
max_level=7),
losses=Losses(l2_weight_decay=3e-5),
train_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'train*'),
is_training=True,
global_batch_size=train_batch_size,
parser=Parser(
aug_rand_hflip=True, aug_scale_min=0.1, aug_scale_max=2.0)),
validation_data=DataConfig(
input_path=os.path.join(COCO_INPUT_PATH_BASE, 'val*'),
is_training=False,
global_batch_size=eval_batch_size)),
trainer=cfg.TrainerConfig(
train_steps=600 * steps_per_epoch,
validation_steps=COCO_VAL_EXAMPLES // eval_batch_size,
validation_interval=steps_per_epoch,
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
'sgd': {
'momentum': 0.9
}
},
'learning_rate': {
'type': 'stepwise',
'stepwise': {
'boundaries': [
575 * steps_per_epoch, 590 * steps_per_epoch
],
'values': [
0.32 * train_batch_size / 256.0,
0.032 * train_batch_size / 256.0,
0.0032 * train_batch_size / 256.0
],
}
},
'warmup': {
'type': 'linear',
'linear': {
'warmup_steps': 2000,
'warmup_learning_rate': 0.0067
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| true | true |
f726499b8d6913a5329f1b82ef19ff7b2b6b251b | 1,827 | py | Python | Arbitrage_Spot/dquant/entrypoint.py | ronaldzgithub/CryptoArbitrage | b4b7a12b7b11f3dcf950f9d2039dad4f1388530b | [
"MIT"
] | 1 | 2021-11-03T06:16:16.000Z | 2021-11-03T06:16:16.000Z | Arbitrage_Spot/dquant/entrypoint.py | benno0810/CryptoArbitrage | b4b7a12b7b11f3dcf950f9d2039dad4f1388530b | [
"MIT"
] | null | null | null | Arbitrage_Spot/dquant/entrypoint.py | benno0810/CryptoArbitrage | b4b7a12b7b11f3dcf950f9d2039dad4f1388530b | [
"MIT"
] | 2 | 2021-05-07T09:11:54.000Z | 2021-11-27T16:29:10.000Z | import argparse
import logging
from logging.handlers import RotatingFileHandler
from dquant.datafeed import Datafeed
class EntryPoint:
datafeed = None
def exec_command(self, args ):
logging.debug('exec_command:%s' % args)
if "feed" in args.command:
self.datafeed = Datafeed()
if args.markets:
self.datafeed.init_markets(args.markets.split(","))
self.datafeed._run_loop()
return
def init_logger(self, args):
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=level)
Rthandler = RotatingFileHandler('../logs/dquant.log', maxBytes=100 * 1024 * 1024, backupCount=10)
Rthandler.setLevel(level)
formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s')
Rthandler.setFormatter(formatter)
logging.getLogger('').addHandler(Rthandler)
def main(self):
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="debug verbose mode",
action="store_true")
parser.add_argument("-m", "--markets", type=str,
help="markets, example: -mokexusd")
parser.add_argument("-s","--strategy",type=str,
help="strategy, example:-smaker")
parser.add_argument("command", nargs='*', default="watch",
help='verb: "feed|exec|rexec"')
args = parser.parse_args()
self.init_logger(args)
self.exec_command(args)
print('main end')
exit(-1)
def main():
entrypoint = EntryPoint()
entrypoint.main()
if __name__ == "__main__":
main()
| 31.5 | 105 | 0.579639 | import argparse
import logging
from logging.handlers import RotatingFileHandler
from dquant.datafeed import Datafeed
class EntryPoint:
datafeed = None
def exec_command(self, args ):
logging.debug('exec_command:%s' % args)
if "feed" in args.command:
self.datafeed = Datafeed()
if args.markets:
self.datafeed.init_markets(args.markets.split(","))
self.datafeed._run_loop()
return
def init_logger(self, args):
level = logging.INFO
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=level)
Rthandler = RotatingFileHandler('../logs/dquant.log', maxBytes=100 * 1024 * 1024, backupCount=10)
Rthandler.setLevel(level)
formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s')
Rthandler.setFormatter(formatter)
logging.getLogger('').addHandler(Rthandler)
def main(self):
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="debug verbose mode",
action="store_true")
parser.add_argument("-m", "--markets", type=str,
help="markets, example: -mokexusd")
parser.add_argument("-s","--strategy",type=str,
help="strategy, example:-smaker")
parser.add_argument("command", nargs='*', default="watch",
help='verb: "feed|exec|rexec"')
args = parser.parse_args()
self.init_logger(args)
self.exec_command(args)
print('main end')
exit(-1)
def main():
entrypoint = EntryPoint()
entrypoint.main()
if __name__ == "__main__":
main()
| true | true |
f7264a51c8db2fd5cc7ad9b2720dd04cb72172fd | 5,693 | py | Python | noval/python/parser/utils.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | noval/python/parser/utils.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | noval/python/parser/utils.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import functools
DATABASE_FILE = "version"
def MakeDirs(dirname):
dirname = os.path.abspath(dirname)
dirname = dirname.replace("\\","/")
dirnames = dirname.split("/")
destdir = ""
destdir = os.path.join(dirnames[0] + "/",dirnames[1])
if not os.path.exists(destdir):
os.mkdir(destdir)
for name in dirnames[2:]:
destdir=os.path.join(destdir,name)
if not os.path.exists(destdir):
os.mkdir(destdir)
def get_relative_name(module_path,path_list = sys.path):
path = os.path.dirname(module_path)
recent_path = ''
while True:
#when route to sys path or root path,such as / or c:\\,skip the circle
if PathsContainPath(path_list,path) or os.path.dirname(path) == path:
recent_path = path
break
path = os.path.dirname(path)
path_name = module_path.replace(recent_path + os.sep,'').split('.')[0]
if os.name == 'nt':
path_name = path_name.replace(os.sep,'/')
parts = path_name.split('/')
if parts[-1] == "__init__":
relative_module_name = '.'.join(parts[0:-1])
is_package = True
else:
relative_module_name = '.'.join(parts)
is_package = False
return relative_module_name,is_package
def strcmp(str1,str2):
i = 0
while i<len(str1) and i<len(str2):
if str1[i] != str2[i]:
if str1[i] == '_':
return 1
elif str2[i] == '_':
return -1
outcome = py_cmp(str1[i],str2[i])
return outcome
i += 1
return py_cmp(len(str1),len(str2))
def CmpMember(x,y):
if strcmp(x.lower() , y.lower()) == 1:
return 1
return -1
def CmpMember2(x,y):
if x.startswith("_") and not y.startswith("_"):
return 1
elif y.startswith("_") and not x.startswith("_"):
return -1
if x.lower() > y.lower():
return 1
return -1
def CompareDatabaseVersion_(new_version,old_version):
new_verions = new_version.split(".")
old_versions = old_version.split(".")
for i,v in enumerate(new_verions):
if i >= len(old_versions):
return 1
if int(v) > int(old_versions[i]):
return 1
return 0
def IsNoneOrEmpty(value):
if value is None:
return True
elif value == "":
return True
return False
def IsPython3():
if sys.version_info[0] >= 3:
return True
return False
def IsPython2():
if sys.version_info[0] == 2:
return True
return False
def ComparePath(path1,path2):
if os.name == 'nt':
path1 = path1.replace("/",os.sep).rstrip(os.sep)
path2 = path2.replace("/",os.sep).rstrip(os.sep)
return path1.lower() == path2.lower()
return path1.rstrip(os.sep) == path2.rstrip(os.sep)
def PathsContainPath(path_list,path):
if os.name == 'nt':
for p in path_list:
if ComparePath(p,path):
return True
return False
return path in path_list
def CalcVersionValue(ver_str="0.0.0"):
"""Calculates a version value from the provided dot-formated string
1) SPECIFICATION: Version value calculation AA.BBB.CCC
- major values: < 1 (i.e 0.0.85 = 0.850)
- minor values: 1 - 999 (i.e 0.1.85 = 1.850)
- micro values: >= 1000 (i.e 1.1.85 = 1001.850)
@keyword ver_str: Version string to calculate value of
"""
ver_str = ''.join([char for char in ver_str
if char.isdigit() or char == '.'])
ver_lvl = ver_str.split(u".")
if len(ver_lvl) < 3:
return 0
major = int(ver_lvl[0]) * 1000
minor = int(ver_lvl[1])
if len(ver_lvl[2]) <= 2:
ver_lvl[2] += u'0'
micro = float(ver_lvl[2]) / 1000
return float(major) + float(minor) + micro
def CompareCommonVersion(new_version,old_version):
'''
比较通用版本号大小,如果新版本号大于旧版本号返回1,否则返回0,返回0才正常,返回1需要更新
'''
def format_version(version_str):
'''
标准化版本字符串,至少包含3个点.如果是类似x.x的版本则转换为x.x.0之类的
'''
if len(version_str.split('.')) == 2:
version_str += ".0"
return version_str
new_version = format_version(new_version)
old_version = format_version(old_version)
if CalcVersionValue(new_version) <= CalcVersionValue(old_version):
return 0
return 1
def py_sorted(iter_obj,cmp_func):
if IsPython2():
sort_obj = sorted(iter_obj, cmp=cmp_func)
elif IsPython3():
sort_obj = sorted(iter_obj, key=functools.cmp_to_key(cmp_func))
return sort_obj
def py3_cmp(l,r):
if r < l:
return 1
if l < r:
return -1
return 0
#python3没有cmp函数,自己实现一个
if IsPython2():
py_cmp = cmp
elif IsPython3():
py_cmp = py3_cmp
def LoadDatabaseVersion(database_location):
with open(os.path.join(database_location,DATABASE_FILE)) as f:
return f.read()
def SaveDatabaseVersion(database_location,new_database_version):
with open(os.path.join(database_location,DATABASE_FILE),"w") as f:
f.write(new_database_version)
def NeedRenewDatabase(database_location,new_database_version):
if not os.path.exists(os.path.join(database_location,DATABASE_FILE)):
return True
old_database_version = LoadDatabaseVersion(database_location)
if 0 == CompareCommonVersion(new_database_version,old_database_version):
return False
return True | 30.121693 | 79 | 0.585456 |
import os
import sys
import functools
DATABASE_FILE = "version"
def MakeDirs(dirname):
dirname = os.path.abspath(dirname)
dirname = dirname.replace("\\","/")
dirnames = dirname.split("/")
destdir = ""
destdir = os.path.join(dirnames[0] + "/",dirnames[1])
if not os.path.exists(destdir):
os.mkdir(destdir)
for name in dirnames[2:]:
destdir=os.path.join(destdir,name)
if not os.path.exists(destdir):
os.mkdir(destdir)
def get_relative_name(module_path,path_list = sys.path):
path = os.path.dirname(module_path)
recent_path = ''
while True:
if PathsContainPath(path_list,path) or os.path.dirname(path) == path:
recent_path = path
break
path = os.path.dirname(path)
path_name = module_path.replace(recent_path + os.sep,'').split('.')[0]
if os.name == 'nt':
path_name = path_name.replace(os.sep,'/')
parts = path_name.split('/')
if parts[-1] == "__init__":
relative_module_name = '.'.join(parts[0:-1])
is_package = True
else:
relative_module_name = '.'.join(parts)
is_package = False
return relative_module_name,is_package
def strcmp(str1,str2):
i = 0
while i<len(str1) and i<len(str2):
if str1[i] != str2[i]:
if str1[i] == '_':
return 1
elif str2[i] == '_':
return -1
outcome = py_cmp(str1[i],str2[i])
return outcome
i += 1
return py_cmp(len(str1),len(str2))
def CmpMember(x,y):
if strcmp(x.lower() , y.lower()) == 1:
return 1
return -1
def CmpMember2(x,y):
if x.startswith("_") and not y.startswith("_"):
return 1
elif y.startswith("_") and not x.startswith("_"):
return -1
if x.lower() > y.lower():
return 1
return -1
def CompareDatabaseVersion_(new_version,old_version):
new_verions = new_version.split(".")
old_versions = old_version.split(".")
for i,v in enumerate(new_verions):
if i >= len(old_versions):
return 1
if int(v) > int(old_versions[i]):
return 1
return 0
def IsNoneOrEmpty(value):
if value is None:
return True
elif value == "":
return True
return False
def IsPython3():
if sys.version_info[0] >= 3:
return True
return False
def IsPython2():
if sys.version_info[0] == 2:
return True
return False
def ComparePath(path1,path2):
if os.name == 'nt':
path1 = path1.replace("/",os.sep).rstrip(os.sep)
path2 = path2.replace("/",os.sep).rstrip(os.sep)
return path1.lower() == path2.lower()
return path1.rstrip(os.sep) == path2.rstrip(os.sep)
def PathsContainPath(path_list,path):
if os.name == 'nt':
for p in path_list:
if ComparePath(p,path):
return True
return False
return path in path_list
def CalcVersionValue(ver_str="0.0.0"):
ver_str = ''.join([char for char in ver_str
if char.isdigit() or char == '.'])
ver_lvl = ver_str.split(u".")
if len(ver_lvl) < 3:
return 0
major = int(ver_lvl[0]) * 1000
minor = int(ver_lvl[1])
if len(ver_lvl[2]) <= 2:
ver_lvl[2] += u'0'
micro = float(ver_lvl[2]) / 1000
return float(major) + float(minor) + micro
def CompareCommonVersion(new_version,old_version):
def format_version(version_str):
if len(version_str.split('.')) == 2:
version_str += ".0"
return version_str
new_version = format_version(new_version)
old_version = format_version(old_version)
if CalcVersionValue(new_version) <= CalcVersionValue(old_version):
return 0
return 1
def py_sorted(iter_obj,cmp_func):
if IsPython2():
sort_obj = sorted(iter_obj, cmp=cmp_func)
elif IsPython3():
sort_obj = sorted(iter_obj, key=functools.cmp_to_key(cmp_func))
return sort_obj
def py3_cmp(l,r):
if r < l:
return 1
if l < r:
return -1
return 0
if IsPython2():
py_cmp = cmp
elif IsPython3():
py_cmp = py3_cmp
def LoadDatabaseVersion(database_location):
with open(os.path.join(database_location,DATABASE_FILE)) as f:
return f.read()
def SaveDatabaseVersion(database_location,new_database_version):
with open(os.path.join(database_location,DATABASE_FILE),"w") as f:
f.write(new_database_version)
def NeedRenewDatabase(database_location,new_database_version):
if not os.path.exists(os.path.join(database_location,DATABASE_FILE)):
return True
old_database_version = LoadDatabaseVersion(database_location)
if 0 == CompareCommonVersion(new_database_version,old_database_version):
return False
return True | true | true |
f7264b00ab45f44826da46ff3c5c64fce9f84f82 | 219 | py | Python | nothing/nothing/doctype/customer_status/test_customer_status.py | libracore/nothing | e334c5a534eb3ec11ad8c77a467fae05f5383af5 | [
"MIT"
] | 1 | 2022-01-12T11:20:22.000Z | 2022-01-12T11:20:22.000Z | nothing/nothing/doctype/customer_status/test_customer_status.py | libracore/nothing | e334c5a534eb3ec11ad8c77a467fae05f5383af5 | [
"MIT"
] | null | null | null | nothing/nothing/doctype/customer_status/test_customer_status.py | libracore/nothing | e334c5a534eb3ec11ad8c77a467fae05f5383af5 | [
"MIT"
] | 2 | 2021-05-07T08:01:13.000Z | 2021-08-14T22:24:33.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestCustomerStatus(unittest.TestCase):
pass
| 19.909091 | 51 | 0.767123 |
from __future__ import unicode_literals
import unittest
class TestCustomerStatus(unittest.TestCase):
pass
| true | true |
f7264b4fcfd7aafc1c81e31c2b3afdfb0672a9ba | 1,144 | py | Python | code/nn.py | arjunchandra/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 17 | 2019-03-29T18:30:36.000Z | 2021-10-17T15:38:22.000Z | code/nn.py | arjunchandra/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 1 | 2019-04-22T22:40:30.000Z | 2019-04-24T21:45:07.000Z | code/nn.py | ctallec/continuous-rl | 8f3c655c6a4b2e9d15a6b052e5466c0a75191a08 | [
"MIT"
] | 5 | 2019-04-29T16:26:18.000Z | 2020-01-23T07:17:49.000Z | """Some nn utilities."""
import torch
from abstract import ParametricFunction
def copy_buffer(net: ParametricFunction, target_net: ParametricFunction):
"""Copy all buffers from net to target_net."""
with torch.no_grad():
for target_buf, buf in zip(target_net.buffers(), net.buffers()): # type: ignore
target_buf.copy_(buf)
def soft_update(net: ParametricFunction, target_net: ParametricFunction, tau: float):
"""Soft update of the parameters of target_net with those of net.
Precisely
theta_targetnet <- tau * theta_targetnet + (1 - tau) * theta_net
"""
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.add_(1 - tau, param - target_param)
def hard_update(net: ParametricFunction, target_net: ParametricFunction):
"""Hard update (i.e. copy) of the parameters of target_net with those of net."""
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.copy_(param)
| 40.857143 | 87 | 0.701049 | import torch
from abstract import ParametricFunction
def copy_buffer(net: ParametricFunction, target_net: ParametricFunction):
with torch.no_grad():
for target_buf, buf in zip(target_net.buffers(), net.buffers()):
target_buf.copy_(buf)
def soft_update(net: ParametricFunction, target_net: ParametricFunction, tau: float):
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.add_(1 - tau, param - target_param)
def hard_update(net: ParametricFunction, target_net: ParametricFunction):
copy_buffer(net, target_net)
with torch.no_grad():
for target_param, param in zip(target_net.parameters(), net.parameters()):
target_param.copy_(param)
| true | true |
f7264b724b3836bde921699ae915f09f3081112e | 598 | py | Python | driver/forms.py | Mariga123/carpool | f7330634ace2718c2347694b207b9dd49ef6538f | [
"MIT"
] | null | null | null | driver/forms.py | Mariga123/carpool | f7330634ace2718c2347694b207b9dd49ef6538f | [
"MIT"
] | null | null | null | driver/forms.py | Mariga123/carpool | f7330634ace2718c2347694b207b9dd49ef6538f | [
"MIT"
] | null | null | null | from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from .models import *
class RegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['email']
class UpdateForm(forms.ModelForm):
class Meta:
model = Driver
fields = ['name', 'bio', 'avatar','contact_info','vehicle'] | 26 | 68 | 0.667224 | from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
from .models import *
class RegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class UserUpdateForm(forms.ModelForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['email']
class UpdateForm(forms.ModelForm):
class Meta:
model = Driver
fields = ['name', 'bio', 'avatar','contact_info','vehicle'] | true | true |
f7264d89247663cd466f161ae8db1fc4b69e2f6a | 946 | py | Python | cms/admin/dialog/views.py | emiquelito/django-cms-2.0 | 721d6aa91925ff46aa0de9f8ea967ca93e73741b | [
"BSD-3-Clause"
] | 1 | 2015-09-28T10:08:14.000Z | 2015-09-28T10:08:14.000Z | cms/admin/dialog/views.py | gmurewa/django-cms-2.0 | 6fab9d93ddcea301a844996f5f0db7edc4883953 | [
"BSD-3-Clause"
] | 1 | 2019-11-08T02:38:49.000Z | 2019-11-08T02:38:49.000Z | cms/admin/dialog/views.py | gmurewa/django-cms-2.0 | 6fab9d93ddcea301a844996f5f0db7edc4883953 | [
"BSD-3-Clause"
] | null | null | null | from cms.admin.dialog.forms import get_copy_dialog_form
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.admin.views.decorators import staff_member_required
from django.http import Http404, HttpResponse
from django.conf import settings
from cms.models import Page
@staff_member_required
def get_copy_dialog(request, page_id):
if not settings.CMS_PERMISSION or not settings.CMS_MODERATOR:
return HttpResponse('')
page = get_object_or_404(Page, pk=page_id)
target = get_object_or_404(Page, pk=request.REQUEST['target'])
if not page.has_change_permission(request) or \
not target.has_add_permission(request):
raise Http404
context = {
'dialog_id': 'dialog-copy',
'form': get_copy_dialog_form(request)(),
'callback': request.REQUEST['callback'],
}
return render_to_response("admin/cms/page/dialog/copy.html", context)
| 36.384615 | 73 | 0.738901 | from cms.admin.dialog.forms import get_copy_dialog_form
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.admin.views.decorators import staff_member_required
from django.http import Http404, HttpResponse
from django.conf import settings
from cms.models import Page
@staff_member_required
def get_copy_dialog(request, page_id):
if not settings.CMS_PERMISSION or not settings.CMS_MODERATOR:
return HttpResponse('')
page = get_object_or_404(Page, pk=page_id)
target = get_object_or_404(Page, pk=request.REQUEST['target'])
if not page.has_change_permission(request) or \
not target.has_add_permission(request):
raise Http404
context = {
'dialog_id': 'dialog-copy',
'form': get_copy_dialog_form(request)(),
'callback': request.REQUEST['callback'],
}
return render_to_response("admin/cms/page/dialog/copy.html", context)
| true | true |
f7264ec3c56d15f017cede4fca8175d52c1cacc4 | 2,219 | py | Python | tests/test_user_storage.py | Cerzon/gb_chat | b4f8a6bf62b0971a135fbb2083456193f7a816cb | [
"Apache-2.0"
] | null | null | null | tests/test_user_storage.py | Cerzon/gb_chat | b4f8a6bf62b0971a135fbb2083456193f7a816cb | [
"Apache-2.0"
] | null | null | null | tests/test_user_storage.py | Cerzon/gb_chat | b4f8a6bf62b0971a135fbb2083456193f7a816cb | [
"Apache-2.0"
] | null | null | null | from unittest.mock import MagicMock
import pytest
from gb_chat.db.user_history_storage import UserHistoryStorage
from gb_chat.db.user_storage import (InvalidName, InvalidPassword, UserExists,
UserNotFound, UserStorage)
from conftest import VALID_PASSWORD, VALID_USERNAME
@pytest.fixture
def user_history_storage():
return MagicMock(spec_set=UserHistoryStorage)
@pytest.fixture
def sut(session, user_history_storage):
return UserStorage(session, user_history_storage)
@pytest.mark.parametrize("username", ["", " ", "user 1", "usr"])
def test_registers_user_raises_when_username_invalid(username, sut):
with pytest.raises(InvalidName):
sut.register_user(username, VALID_PASSWORD)
@pytest.mark.parametrize("password", ["", "qwerty", "password", "passw0rd", "Passw0rd"])
def test_registers_user_raises_when_password_invalid(password, sut):
with pytest.raises(InvalidPassword):
sut.register_user(VALID_USERNAME, password)
def test_registers_user_adds_register_record(sut, user_history_storage):
sut.register_user(VALID_USERNAME, VALID_PASSWORD)
user_history_storage.add_register_record.assert_called_once()
call = user_history_storage.add_register_record.mock_calls[0]
user = call.args[0]
assert user.username == VALID_USERNAME
assert user.password == VALID_PASSWORD
@pytest.fixture
def sut_with_user(sut):
sut.register_user(VALID_USERNAME, VALID_PASSWORD)
return sut
def test_registers_user_raises_when_same_name(sut_with_user):
with pytest.raises(UserExists):
sut_with_user.register_user(VALID_USERNAME, "P@ssw0rd111")
@pytest.mark.parametrize(
"username,password", [(VALID_USERNAME, "pass"), ("user1", VALID_PASSWORD)]
)
def test_credentials_invalid(username, password, sut_with_user):
assert not sut_with_user.credentials_valid(username, password)
def test_get_user_raises_when_no_user_found(sut):
with pytest.raises(UserNotFound):
sut.get_user_by_name("aaaa")
def test_get_user_raises_when_no_user_found(sut_with_user):
user = sut_with_user.get_user_by_name(VALID_USERNAME)
assert user.username == VALID_USERNAME
assert user.password == VALID_PASSWORD
| 32.15942 | 88 | 0.775124 | from unittest.mock import MagicMock
import pytest
from gb_chat.db.user_history_storage import UserHistoryStorage
from gb_chat.db.user_storage import (InvalidName, InvalidPassword, UserExists,
UserNotFound, UserStorage)
from conftest import VALID_PASSWORD, VALID_USERNAME
@pytest.fixture
def user_history_storage():
return MagicMock(spec_set=UserHistoryStorage)
@pytest.fixture
def sut(session, user_history_storage):
return UserStorage(session, user_history_storage)
@pytest.mark.parametrize("username", ["", " ", "user 1", "usr"])
def test_registers_user_raises_when_username_invalid(username, sut):
with pytest.raises(InvalidName):
sut.register_user(username, VALID_PASSWORD)
@pytest.mark.parametrize("password", ["", "qwerty", "password", "passw0rd", "Passw0rd"])
def test_registers_user_raises_when_password_invalid(password, sut):
with pytest.raises(InvalidPassword):
sut.register_user(VALID_USERNAME, password)
def test_registers_user_adds_register_record(sut, user_history_storage):
sut.register_user(VALID_USERNAME, VALID_PASSWORD)
user_history_storage.add_register_record.assert_called_once()
call = user_history_storage.add_register_record.mock_calls[0]
user = call.args[0]
assert user.username == VALID_USERNAME
assert user.password == VALID_PASSWORD
@pytest.fixture
def sut_with_user(sut):
sut.register_user(VALID_USERNAME, VALID_PASSWORD)
return sut
def test_registers_user_raises_when_same_name(sut_with_user):
with pytest.raises(UserExists):
sut_with_user.register_user(VALID_USERNAME, "P@ssw0rd111")
@pytest.mark.parametrize(
"username,password", [(VALID_USERNAME, "pass"), ("user1", VALID_PASSWORD)]
)
def test_credentials_invalid(username, password, sut_with_user):
assert not sut_with_user.credentials_valid(username, password)
def test_get_user_raises_when_no_user_found(sut):
with pytest.raises(UserNotFound):
sut.get_user_by_name("aaaa")
def test_get_user_raises_when_no_user_found(sut_with_user):
user = sut_with_user.get_user_by_name(VALID_USERNAME)
assert user.username == VALID_USERNAME
assert user.password == VALID_PASSWORD
| true | true |
f7264fe3b301f10852827f18dc032e373e7bf3a4 | 25,966 | py | Python | tests/test_commandline.py | marcelm/cutadapt | c63043e0f43970619bb7f8c1242912c236d60545 | [
"MIT"
] | 375 | 2015-01-16T14:04:50.000Z | 2022-03-16T02:19:43.000Z | tests/test_commandline.py | marcelm/cutadapt | c63043e0f43970619bb7f8c1242912c236d60545 | [
"MIT"
] | 589 | 2015-03-05T20:06:03.000Z | 2022-03-29T22:49:56.000Z | tests/test_commandline.py | marcelm/cutadapt | c63043e0f43970619bb7f8c1242912c236d60545 | [
"MIT"
] | 150 | 2015-02-10T12:19:40.000Z | 2022-03-25T05:06:50.000Z | import subprocess
import sys
import os
from io import StringIO, BytesIO
import dnaio
import pytest
from cutadapt.__main__ import main
from utils import assert_files_equal, datapath, cutpath
# pytest.mark.timeout will not fail even if pytest-timeout is not installed
try:
import pytest_timeout as _unused
except ImportError: # pragma: no cover
raise ImportError("pytest_timeout needs to be installed")
del _unused
def test_does_not_close_stdout():
main([datapath("small.fastq")])
assert not sys.stdout.closed
def test_help():
with pytest.raises(SystemExit) as e:
main(["--help"])
assert e.value.args[0] == 0
def test_unknown_file_format(tmp_path):
path = tmp_path / "unknown_format.txt"
path.write_text("raw text")
with pytest.raises(SystemExit):
main([str(path)])
def test_cores_negative():
with pytest.raises(SystemExit) as e:
main(["--cores=-1", datapath("simple.fasta")])
assert e.value.args[0] == 2
# "cannot be negative"
def test_quiet_and_report():
with pytest.raises(SystemExit) as e:
main(["--quiet", "--report=minimal", datapath("simple.fasta")])
assert e.value.args[0] == 2
# "Options --quiet and --report cannot be used at the same time"
@pytest.mark.parametrize("args", [
("--discard-trimmed", "--discard-untrimmed"),
("--discard-trimmed", "--untrimmed-output", os.devnull),
("--discard-untrimmed", "--untrimmed-output", os.devnull),
])
def test_only_one_of_discard_trimmed_discard_untrimmed_untrimmed_output(args):
with pytest.raises(SystemExit) as e:
main(["-o", os.devnull, *args, datapath("small.fastq")])
assert e.value.args[0] == 2
def test_debug():
main(["--debug", "--", datapath("small.fastq")])
def test_debug_trace():
main(["--debug", "--debug", "-a", "ACGT", datapath("small.fastq")])
def test_example(run):
run('-N -b ADAPTER', 'example.fa', 'example.fa')
def test_compressed_fasta(run):
run("", "simple.fasta", "simple.fasta.gz")
def test_small(run):
run('-a TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq')
def test_empty(run, cores):
"""empty input"""
run("--cores {} -a TTAGACATATCTCCGTCG".format(cores), "empty.fastq", "empty.fastq")
def test_newlines(run):
"""DOS/Windows newlines"""
run('-e 0.12 -a TTAGACATATCTCCGTCG', 'dos.fastq', 'dos.fastq')
def test_lowercase(run):
"""lowercase adapter"""
run('-a ttagacatatctccgtcg', 'lowercase.fastq', 'small.fastq')
def test_rest(run, tmp_path, cores):
"""-r/--rest-file"""
rest = tmp_path / "rest.tmp"
run(['--cores', str(cores), '-b', 'ADAPTER', '-N', '-r', rest], "rest.fa", "rest.fa")
assert_files_equal(datapath('rest.txt'), rest)
def test_restfront(run, tmp_path):
path = tmp_path / "rest.txt"
run(['-g', 'ADAPTER', '-N', '-r', path], "restfront.fa", "rest.fa")
assert_files_equal(datapath('restfront.txt'), path)
def test_discard(run):
"""--discard"""
run("-b TTAGACATATCTCCGTCG --discard", "discard.fastq", "small.fastq")
def test_discard_untrimmed(run):
"""--discard-untrimmed"""
run('-b CAAGAT --discard-untrimmed', 'discard-untrimmed.fastq', 'small.fastq')
def test_extensiontxtgz(run):
"""automatic recognition of "_sequence.txt.gz" extension"""
run("-b TTAGACATATCTCCGTCG", "s_1_sequence.txt", "s_1_sequence.txt.gz")
def test_minimum_length(run):
"""-m/--minimum-length"""
stats = run("-m 5 -a TTAGACATATCTCCGTCG", "minlen.fa", "lengths.fa")
assert stats.written_bp[0] == 45
assert stats.written == 6
def test_too_short(run, tmp_path, cores):
too_short_path = tmp_path / 'tooshort.fa'
stats = run([
"--cores", str(cores),
"-m", "5",
"-a", "TTAGACATATCTCCGTCG",
"--too-short-output", too_short_path
], "minlen.fa", "lengths.fa")
assert_files_equal(datapath('tooshort.fa'), too_short_path)
assert stats.filtered["too_short"] == 5
@pytest.mark.parametrize("redirect", (False, True))
def test_too_short_statistics(redirect):
args = ["-a", "TTAGACATATCTCCGTCG", "-m", "24", "-o", os.devnull, datapath("small.fastq")]
if redirect:
args[:0] = ["--too-short-output", os.devnull]
stats = main(args)
assert stats.with_adapters[0] == 2
assert stats.written == 2
assert stats.written_bp[0] == 58
assert stats.filtered["too_short"] == 1
def test_maximum_length(run):
"""-M/--maximum-length"""
run("-M 5 -a TTAGACATATCTCCGTCG", "maxlen.fa", "lengths.fa")
def test_too_long(run, tmp_path, cores):
"""--too-long-output"""
too_long_path = tmp_path / 'toolong.fa'
stats = run([
"--cores", str(cores),
"-M", "5",
"-a", "TTAGACATATCTCCGTCG",
"--too-long-output", too_long_path
], "maxlen.fa", "lengths.fa")
assert_files_equal(datapath('toolong.fa'), too_long_path)
assert stats.filtered["too_long"] == 5
def test_length_tag(run):
"""454 data; -n and --length-tag"""
run("-n 3 -e 0.1 --length-tag length= "
"-b TGAGACACGCAACAGGGGAAAGGCAAGGCACACAGGGGATAGG "
"-b TCCATCTCATCCCTGCGTGTCCCATCTGTTCCCTCCCTGTCTCA", '454.fa', '454.fa')
@pytest.mark.parametrize("length", list(range(3, 11)))
def test_overlap_a(tmp_path, length):
"""-O/--overlap with -a"""
adapter = "catatctccg"
record = ">read\nGAGACCATTCCAATG" + adapter[:length] + '\n'
input = tmp_path / "overlap.fasta"
input.write_text(record)
if length < 7:
expected = record
else:
expected = '>read\nGAGACCATTCCAATG\n'
output = tmp_path / "overlap-trimmed.fasta"
main(["-O", "7", "-e", "0", "-a", adapter, "-o", str(output), str(input)])
assert expected == output.read_text()
def test_overlap_b(run):
"""-O/--overlap with -b"""
run("-O 10 -b TTAGACATATCTCCGTCG", "overlapb.fa", "overlapb.fa")
def test_trim_n(run):
run("--trim-n", "trim-n.fasta", "trim-n.fasta")
def test_qualtrim(run):
"""-q with low qualities"""
run("-q 10 -a XXXXXX", "lowqual.fastq", "lowqual.fastq")
def test_qualbase(run):
"""-q with low qualities, using ascii(quality+64) encoding"""
run("-q 10 --quality-base 64 -a XXXXXX", "illumina64.fastq", "illumina64.fastq")
def test_quality_trim_only(run):
"""only trim qualities, do not remove adapters"""
run("-q 10 --quality-base 64", "illumina64.fastq", "illumina64.fastq")
def test_twoadapters(run):
"""two adapters"""
run("-a AATTTCAGGAATT -a GTTCTCTAGTTCT", "twoadapters.fasta", "twoadapters.fasta")
def test_polya(run):
"""poly-A tails"""
run("-m 24 -O 10 -a AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "polya.fasta", "polya.fasta")
def test_polya_brace_notation(run):
"""poly-A tails"""
run("-m 24 -O 10 -a A{35}", "polya.fasta", "polya.fasta")
# the same as --action=none
def test_no_trim(run):
run("--no-trim --discard-untrimmed -a CCCTAGTTAAAC", 'no-trim.fastq', 'small.fastq')
def test_action_none(run):
run("--action=none --discard-untrimmed -a CCCTAGTTAAAC", 'no-trim.fastq', 'small.fastq')
# the same as --action=mask
def test_mask_adapter(run):
"""mask adapter with N (reads maintain the same length)"""
run("-b CAAG -n 3 --mask-adapter", "anywhere_repeat.fastq", "anywhere_repeat.fastq")
def test_action_mask(run):
"""mask adapter with N (reads maintain the same length)"""
run("-b CAAG -n 3 --action=mask", "anywhere_repeat.fastq", "anywhere_repeat.fastq")
def test_action_lowercase(run):
run("-b CAAG -n 3 --action=lowercase", "action_lowercase.fasta", "action_lowercase.fasta")
def test_action_retain(run):
run("-g GGTTAACC -a CAAG --action=retain", "action_retain.fasta", "action_retain.fasta")
def test_action_retain_times():
with pytest.raises(SystemExit):
main(["-a", "ACGT", "--times=2", "--action=retain", datapath("small.fastq")])
def test_gz_multiblock(run):
"""compressed gz file with multiple blocks (created by concatenating two .gz files)"""
run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.gz")
def test_read_wildcard(run):
"""test wildcards in reads"""
run("--match-read-wildcards -b ACGTACGT", "wildcard.fa", "wildcard.fa")
@pytest.mark.parametrize("adapter_type,expected", [
("-a", "wildcard_adapter.fa"),
("-b", "wildcard_adapter_anywhere.fa"),
])
def test_adapter_wildcard(adapter_type, expected, run, tmp_path, cores):
"""wildcards in adapter"""
wildcard_path = tmp_path / "wildcards.txt"
run([
"--cores", str(cores),
"--wildcard-file", wildcard_path,
adapter_type, "ACGTNNNACGT"
], expected, "wildcard_adapter.fa")
with open(wildcard_path) as wct:
lines = wct.readlines()
lines = [line.strip() for line in lines]
assert lines == ["AAA 1", "GGG 2", "CCC 3b", "TTT 4b"]
def test_wildcard_N(run):
"""test 'N' wildcard matching with no allowed errors"""
run("-e 0 -a GGGGGGG --match-read-wildcards", "wildcardN.fa", "wildcardN.fa")
def test_illumina_adapter_wildcard(run):
run("-a VCCGAMCYUCKHRKDCUBBCNUWNSGHCGU", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_front(run):
"""test adapter in front"""
run("--front ADAPTER -N", "examplefront.fa", "example.fa")
def test_literal_N(run):
"""test matching literal 'N's"""
run("-N -e 0.2 -a NNNNNNNNNNNNNN", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2(run):
run("-N -O 1 -g NNNNNNNNNNNNNN", "trimN5.fasta", "trimN5.fasta")
def test_literal_N_brace_notation(run):
"""test matching literal 'N's"""
run("-N -e 0.2 -a N{14}", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2_brace_notation(run):
run("-N -O 1 -g N{14}", "trimN5.fasta", "trimN5.fasta")
def test_anchored_front(run):
run("-g ^FRONTADAPT -N", "anchored.fasta", "anchored.fasta")
def test_anchored_front_ellipsis_notation(run):
run("-a ^FRONTADAPT... -N", "anchored.fasta", "anchored.fasta")
def test_anchored_back(run):
run("-a BACKADAPTER$ -N", "anchored-back.fasta", "anchored-back.fasta")
def test_anchored_back_ellipsis_notation(run):
run("-a ...BACKADAPTER$ -N", "anchored-back.fasta", "anchored-back.fasta")
def test_anchored_back_no_indels(run):
run("-a BACKADAPTER$ -N --no-indels", "anchored-back.fasta", "anchored-back.fasta")
def test_no_indels(run):
run('-a TTAGACATAT -g GAGATTGCCA --no-indels', 'no_indels.fasta', 'no_indels.fasta')
def test_ellipsis_notation(run):
run('-a ...TTAGACATAT -g GAGATTGCCA --no-indels', 'no_indels.fasta', 'no_indels.fasta')
def test_issue_46(run, tmp_path):
"""issue 46 - IndexError with --wildcard-file"""
run("--anywhere=AACGTN --wildcard-file={}".format(
tmp_path / "wildcards.txt"), "issue46.fasta", "issue46.fasta")
def test_strip_suffix(run):
run("--strip-suffix _sequence -a XXXXXXX", "stripped.fasta", "simple.fasta")
def test_info_file(run, tmp_path, cores):
# The true adapter sequence in the illumina.fastq.gz data set is
# GCCTAACTTCTTAGACTGCCTTAAGGACGT (fourth base is different from the sequence shown here)
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "-a", "adapt=GCCGAACTTCTTAGACTGCCTTAAGGACGT"],
"illumina.fastq", "illumina.fastq.gz")
assert_files_equal(cutpath("illumina.info.txt"), info_path, ignore_trailing_space=True)
def test_info_file_times(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "--times", "2", "-a", "adapt=GCCGAACTTCTTA",
"-a", "adapt2=GACTGCCTTAAGGACGT"], "illumina5.fastq", "illumina5.fastq")
assert_files_equal(cutpath('illumina5.info.txt'), info_path, ignore_trailing_space=True)
def test_info_file_fasta(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
# Just make sure that it runs
run(["--cores", str(cores), "--info-file", info_path, "-a", "TTAGACATAT", "-g", "GAGATTGCCA", "--no-indels"],
"no_indels.fasta", "no_indels.fasta")
def test_info_file_revcomp(run, tmp_path):
info_path = tmp_path / "info-rc.txt"
main([
"--info-file", str(info_path),
"-a", "adapt=GAGTCG",
"--revcomp",
"--rename={header}",
"-o", str(tmp_path / "out.fasta"),
datapath("info-rc.fasta")
])
assert_files_equal(cutpath("info-rc.txt"), info_path)
def test_named_adapter(run):
run("-a MY_ADAPTER=GCCGAACTTCTTAGACTGCCTTAAGGACGT", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_with_u(run):
run("-a GCCGAACUUCUUAGACUGCCUUAAGGACGU", "illumina.fastq", "illumina.fastq.gz")
def test_bzip2_input(run, cores):
run(["--cores", str(cores), "-a", "TTAGACATATCTCCGTCG"], "small.fastq", "small.fastq.bz2")
@pytest.mark.parametrize("extension", ["bz2", "xz", "gz"])
def test_compressed_output(tmp_path, cores, extension):
out_path = str(tmp_path / ("small.fastq." + extension))
params = [
"--cores", str(cores), "-a", "TTAGACATATCTCCGTCG", "-o", out_path, datapath("small.fastq")]
main(params)
def test_bzip2_multiblock(run):
run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.bz2")
def test_xz(run):
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq.xz')
def test_no_args():
with pytest.raises(SystemExit):
main([])
def test_two_fastqs():
with pytest.raises(SystemExit):
main([datapath('paired.1.fastq'), datapath('paired.2.fastq')])
def test_anchored_no_indels(run):
"""anchored 5' adapter, mismatches only (no indels)"""
run('-g ^TTAGACATAT --no-indels -e 0.1', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_read(run):
"""anchored 5' adapter, mismatches only (no indels), but wildcards in the read count as matches"""
run('-g ^TTAGACATAT --match-read-wildcards --no-indels -e 0.1',
'anchored_no_indels_wildcard.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_adapt(run):
"""anchored 5' adapter, mismatches only (no indels), but wildcards in the adapter count as matches"""
run('-g ^TTAGACANAT --no-indels -e 0.12', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_non_iupac_characters(run):
with pytest.raises(SystemExit):
main(['-a', 'ZACGT', datapath('small.fastq')])
def test_unconditional_cut_front(run):
run('-u 5', 'unconditional-front.fastq', 'small.fastq')
def test_unconditional_cut_back(run):
run('-u -5', 'unconditional-back.fastq', 'small.fastq')
def test_unconditional_cut_both(run):
run('-u -5 -u 5', 'unconditional-both.fastq', 'small.fastq')
def test_unconditional_cut_too_many_commas():
with pytest.raises(SystemExit):
main(["-u", "5,7,8", datapath("small.fastq")])
def test_unconditional_cut_invalid_number():
with pytest.raises(SystemExit):
main(["-u", "a,b", datapath("small.fastq")])
def test_untrimmed_output(run, cores, tmp_path):
path = tmp_path / "untrimmed.fastq"
stats = run(["--cores", str(cores), "-a", "TTAGACATATCTCCGTCG", "--untrimmed-output", path],
"small.trimmed.fastq", "small.fastq")
assert_files_equal(cutpath("small.untrimmed.fastq"), path)
assert stats.with_adapters[0] == 2
assert stats.written == 2
assert stats.written_bp[0] == 46
def test_adapter_file(run):
run('-a file:' + datapath('adapter.fasta'), 'illumina.fastq', 'illumina.fastq.gz')
def test_adapter_file_5p_anchored(run):
run('-N -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored(run):
run('-N -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_adapter_file_5p_anchored_no_indels(run):
run('-N --no-indels -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored_no_indels(run):
run('-N --no-indels -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_adapter_file_empty_name(run):
run('-N -a file:' + datapath('adapter-empty-name.fasta'), 'illumina.fastq', 'illumina.fastq.gz')
@pytest.mark.parametrize("ext", ["", ".gz"])
def test_demultiplex(cores, tmp_path, ext):
multiout = str(tmp_path / 'tmp-demulti.{name}.fasta') + ext
params = [
'--cores', str(cores),
'-a', 'first=AATTTCAGGAATT',
'-a', 'second=GTTCTCTAGTTCT',
'-o', multiout,
datapath('twoadapters.fasta'),
]
main(params)
for name in ("first", "second", "unknown"):
actual = multiout.format(name=name)
if ext == ".gz":
subprocess.run(["gzip", "-d", actual], check=True)
actual = actual[:-3]
expected = cutpath("twoadapters.{name}.fasta".format(name=name))
assert_files_equal(expected, actual)
def test_multiple_fake_anchored_adapters(run):
run("-g ^CGTCCGAAGTAGC -g ^ATTGCCCTAG "
"-a TTCCATGCAGCATT$ -a CCAGTCCCCCC$ "
"-a GCCGAACTTCTTAGACTGCCTTAAGGACGT",
"illumina.fastq",
"illumina.fastq.gz")
def test_multiple_prefix_adapters(run):
run("-g ^GTACGGATTGTTCAGTA -g ^TATTAAGCTCATTC", "multiprefix.fasta", "multi.fasta")
def test_multiple_prefix_adapters_noindels(run):
run("--no-indels -g ^GTACGGATTGTTCAGTA -g ^TATTAAGCTCATTC", "multiprefix.fasta", "multi.fasta")
def test_multiple_suffix_adapters_noindels(run):
run("--no-indels -a CGTGATTATCTTGC$ -a CCTATTAGTGGTTGAAC$", "multisuffix.fasta", "multi.fasta")
def test_max_n(run):
assert run('--max-n 0', 'maxn0.fasta', 'maxn.fasta').filtered["too_many_n"] == 4
assert run('--max-n 1', 'maxn1.fasta', 'maxn.fasta').filtered["too_many_n"] == 2
assert run('--max-n 2', 'maxn2.fasta', 'maxn.fasta').filtered["too_many_n"] == 1
assert run('--max-n 0.2', 'maxn0.2.fasta', 'maxn.fasta').filtered["too_many_n"] == 3
assert run('--max-n 0.4', 'maxn0.4.fasta', 'maxn.fasta').filtered["too_many_n"] == 2
def test_quiet_is_quiet():
captured_standard_output = StringIO()
captured_standard_error = StringIO()
setattr(captured_standard_output, "buffer", BytesIO())
setattr(captured_standard_error, "buffer", BytesIO())
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = captured_standard_output
sys.stderr = captured_standard_error
main(['-o', os.devnull, '--quiet', datapath('small.fastq')])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
assert captured_standard_output.getvalue() == ''
assert captured_standard_error.getvalue() == ''
assert getattr(captured_standard_output, "buffer").getvalue() == b''
assert getattr(captured_standard_output, "buffer").getvalue() == b''
def test_x_brace_notation():
main(['-o', os.devnull, '--quiet', '-a', 'X{5}', datapath('small.fastq')])
def test_nextseq(run):
run('--nextseq-trim 22', 'nextseq.fastq', 'nextseq.fastq')
def test_linked_explicitly_anchored(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT', 'linked.fasta', 'linked.fasta')
def test_linked_multiple(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT -a ^AAAAAAAAAA...GCGCGCGCGC', 'linked.fasta', 'linked.fasta')
def test_linked_both_anchored(run):
run('-a ^AAAAAAAAAA...TTTTT$', 'linked-anchored.fasta', 'linked.fasta')
def test_linked_5p_not_anchored(run):
run('-g AAAAAAAAAA...TTTTTTTTTT', 'linked-not-anchored.fasta', 'linked.fasta')
def test_linked_discard_untrimmed(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT --discard-untrimmed', 'linked-discard.fasta', 'linked.fasta')
def test_linked_discard_untrimmed_g(run):
run('-g AAAAAAAAAA...TTTTTTTTTT --discard-untrimmed', 'linked-discard-g.fasta', 'linked.fasta')
def test_linked_lowercase(run):
run('-a ^AACCGGTTTT...GGGGGGG$ -a ^AAAA...TTTT$ --times=2 --action=lowercase',
'linked-lowercase.fasta', 'linked.fasta')
def test_linked_info_file(tmp_path):
info_path = tmp_path / 'info.txt'
main(['-a linkedadapter=^AAAAAAAAAA...TTTTTTTTTT', '--info-file', str(info_path),
'-o', str(tmp_path / 'out.fasta'), datapath('linked.fasta')])
assert_files_equal(cutpath('linked-info.txt'), info_path, ignore_trailing_space=True)
def test_linked_anywhere():
with pytest.raises(SystemExit):
main(['-b', 'AAA...TTT', datapath('linked.fasta')])
def test_anywhere_anchored_5p():
with pytest.raises(SystemExit):
main(['-b', '^AAA', datapath('small.fastq')])
def test_anywhere_anchored_3p():
with pytest.raises(SystemExit):
main(['-b', 'TTT$', datapath('small.fastq')])
def test_fasta(run):
run('-a TTAGACATATCTCCGTCG', 'small.fasta', 'small.fastq')
def test_fasta_no_trim(run):
run([], 'small-no-trim.fasta', 'small.fastq')
def test_length(run):
run('--length 5', 'shortened.fastq', 'small.fastq')
def test_negative_length(run):
run('--length -5', 'shortened-negative.fastq', 'small.fastq')
@pytest.mark.timeout(0.5)
def test_issue_296(tmp_path):
# Hang when using both --no-trim and --info-file together
info_path = tmp_path / 'info.txt'
reads_path = tmp_path / 'reads.fasta'
out_path = tmp_path / 'out.fasta'
reads_path.write_text(">read\nCACAAA\n")
main([
"--info-file", str(info_path),
"--no-trim",
"-g", "TTTCAC",
"-o", str(out_path),
str(reads_path),
])
# Output should be unchanged because of --no-trim
assert_files_equal(reads_path, out_path)
def test_xadapter(run):
run('-g XTCCGAATAGA', 'xadapter.fasta', 'xadapterx.fasta')
def test_adapterx(run):
run('-a TCCGAATAGAX', 'adapterx.fasta', 'xadapterx.fasta')
def test_discard_casava(run):
stats = run('--discard-casava', 'casava.fastq', 'casava.fastq')
assert stats.filtered["casava_filtered"] == 1
def test_underscore(run):
"""File name ending in _fastq.gz (issue #275)"""
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'underscore_fastq.gz')
def test_cores_autodetect(run):
# Just make sure that it runs; functionality is not tested
run('--cores 0 -b TTAGACATATCTCCGTCG', 'small.fastq', 'underscore_fastq.gz')
def test_write_compressed_fastq(cores, tmp_path):
main(['--cores', str(cores), '-o', str(tmp_path / 'out.fastq.gz'), datapath('small.fastq')])
def test_minimal_report(run):
run('-b TTAGACATATCTCCGTCG --report=minimal', 'small.fastq', 'small.fastq')
def test_paired_separate(run):
"""test separate trimming of paired-end reads"""
run("-a TTAGACATAT", "paired-separate.1.fastq", "paired.1.fastq")
run("-a CAGTGGAGTA", "paired-separate.2.fastq", "paired.2.fastq")
def test_empty_read_with_wildcard_in_adapter(run):
run("-g CWC", "empty.fastq", "empty.fastq")
def test_print_progress_to_tty(tmp_path, mocker):
mocker.patch("cutadapt.utils.sys.stderr").isatty.return_value = True
main(["-o", str(tmp_path / "out.fastq"), datapath("small.fastq")])
def test_adapter_order(run):
run("-g ^AAACC -a CCGGG", "adapterorder-ga.fasta", "adapterorder.fasta")
run("-a CCGGG -g ^AAACC", "adapterorder-ag.fasta", "adapterorder.fasta")
def test_reverse_complement_no_rc_suffix(run, tmp_path):
out_path = tmp_path / "out.fastq"
main([
"-o", str(out_path),
"--revcomp",
"--no-index",
"--rename", "{header}",
"-g", "^TTATTTGTCT",
"-g", "^TCCGCACTGG",
datapath("revcomp.1.fastq")
])
with dnaio.open(out_path) as f:
reads = list(f)
assert len(reads) == 6
assert reads[1].name == "read2/1"
assert reads[1].sequence == "ACCATCCGATATGTCTAATGTGGCCTGTTG"
def test_reverse_complement_normalized(run):
stats = run(
"--revcomp --no-index -g ^TTATTTGTCT -g ^TCCGCACTGG",
"revcomp-single-normalize.fastq",
"revcomp.1.fastq",
)
assert stats.n == 6
assert stats.reverse_complemented == 2
def test_reverse_complement_and_info_file(run, tmp_path, cores):
info_path = str(tmp_path / "info.txt")
run(
[
"--revcomp",
"--no-index",
"-g",
"^TTATTTGTCT",
"-g",
"^TCCGCACTGG",
"--info-file",
info_path,
],
"revcomp-single-normalize.fastq",
"revcomp.1.fastq",
)
with open(info_path) as f:
lines = f.readlines()
assert len(lines) == 6
assert lines[0].split("\t")[0] == "read1/1"
assert lines[1].split("\t")[0] == "read2/1 rc"
def test_max_expected_errors(run, cores):
stats = run("--max-ee=0.9", "maxee.fastq", "maxee.fastq")
assert stats.filtered["too_many_expected_errors"] == 2
def test_max_expected_errors_fasta(tmp_path):
path = tmp_path / "input.fasta"
path.write_text(">read\nACGTACGT\n")
main(["--max-ee=0.001", "-o", os.devnull, str(path)])
def test_warn_if_en_dashes_used():
with pytest.raises(SystemExit):
main(["–q", "25", "-o", os.devnull, "in.fastq"])
@pytest.mark.parametrize("opt", ["-y", "--suffix"])
def test_suffix(opt, run):
"""-y/--suffix parameter"""
run([opt, ' {name}', '-e', '0', '-a', 'OnlyT=TTTTTTTT', '-a', 'OnlyG=GGGGGGGG'], "suffix.fastq", "suffix.fastq")
@pytest.mark.parametrize("opt", ["--prefix", "--suffix"])
def test_rename_cannot_be_combined_with_other_renaming_options(opt):
with pytest.raises(SystemExit):
main([opt, "something", "--rename='{id} {comment} extrainfo'", "-o", os.devnull, datapath("empty.fastq")])
def test_rename(run):
run([
"--rename={id}_{cut_suffix} {header} {adapter_name}",
"--cut=-4",
"-a", "OnlyT=TTTTTT",
"-a", "OnlyG=GGGGGG",
], "rename.fastq", "suffix.fastq")
@pytest.mark.skip("This has not been fixed")
def test_terminates_correctly_on_error_in_subprocess(tmp_path):
params = [
"-j", "2",
"-o", str(tmp_path / "out.fastq.gz"),
datapath("format-error.fastq"),
]
with pytest.raises(SystemExit):
main(params)
| 31.359903 | 116 | 0.655973 | import subprocess
import sys
import os
from io import StringIO, BytesIO
import dnaio
import pytest
from cutadapt.__main__ import main
from utils import assert_files_equal, datapath, cutpath
try:
import pytest_timeout as _unused
except ImportError:
raise ImportError("pytest_timeout needs to be installed")
del _unused
def test_does_not_close_stdout():
main([datapath("small.fastq")])
assert not sys.stdout.closed
def test_help():
with pytest.raises(SystemExit) as e:
main(["--help"])
assert e.value.args[0] == 0
def test_unknown_file_format(tmp_path):
path = tmp_path / "unknown_format.txt"
path.write_text("raw text")
with pytest.raises(SystemExit):
main([str(path)])
def test_cores_negative():
with pytest.raises(SystemExit) as e:
main(["--cores=-1", datapath("simple.fasta")])
assert e.value.args[0] == 2
def test_quiet_and_report():
with pytest.raises(SystemExit) as e:
main(["--quiet", "--report=minimal", datapath("simple.fasta")])
assert e.value.args[0] == 2
@pytest.mark.parametrize("args", [
("--discard-trimmed", "--discard-untrimmed"),
("--discard-trimmed", "--untrimmed-output", os.devnull),
("--discard-untrimmed", "--untrimmed-output", os.devnull),
])
def test_only_one_of_discard_trimmed_discard_untrimmed_untrimmed_output(args):
with pytest.raises(SystemExit) as e:
main(["-o", os.devnull, *args, datapath("small.fastq")])
assert e.value.args[0] == 2
def test_debug():
main(["--debug", "--", datapath("small.fastq")])
def test_debug_trace():
main(["--debug", "--debug", "-a", "ACGT", datapath("small.fastq")])
def test_example(run):
run('-N -b ADAPTER', 'example.fa', 'example.fa')
def test_compressed_fasta(run):
run("", "simple.fasta", "simple.fasta.gz")
def test_small(run):
run('-a TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq')
def test_empty(run, cores):
run("--cores {} -a TTAGACATATCTCCGTCG".format(cores), "empty.fastq", "empty.fastq")
def test_newlines(run):
run('-e 0.12 -a TTAGACATATCTCCGTCG', 'dos.fastq', 'dos.fastq')
def test_lowercase(run):
run('-a ttagacatatctccgtcg', 'lowercase.fastq', 'small.fastq')
def test_rest(run, tmp_path, cores):
rest = tmp_path / "rest.tmp"
run(['--cores', str(cores), '-b', 'ADAPTER', '-N', '-r', rest], "rest.fa", "rest.fa")
assert_files_equal(datapath('rest.txt'), rest)
def test_restfront(run, tmp_path):
path = tmp_path / "rest.txt"
run(['-g', 'ADAPTER', '-N', '-r', path], "restfront.fa", "rest.fa")
assert_files_equal(datapath('restfront.txt'), path)
def test_discard(run):
run("-b TTAGACATATCTCCGTCG --discard", "discard.fastq", "small.fastq")
def test_discard_untrimmed(run):
run('-b CAAGAT --discard-untrimmed', 'discard-untrimmed.fastq', 'small.fastq')
def test_extensiontxtgz(run):
run("-b TTAGACATATCTCCGTCG", "s_1_sequence.txt", "s_1_sequence.txt.gz")
def test_minimum_length(run):
stats = run("-m 5 -a TTAGACATATCTCCGTCG", "minlen.fa", "lengths.fa")
assert stats.written_bp[0] == 45
assert stats.written == 6
def test_too_short(run, tmp_path, cores):
too_short_path = tmp_path / 'tooshort.fa'
stats = run([
"--cores", str(cores),
"-m", "5",
"-a", "TTAGACATATCTCCGTCG",
"--too-short-output", too_short_path
], "minlen.fa", "lengths.fa")
assert_files_equal(datapath('tooshort.fa'), too_short_path)
assert stats.filtered["too_short"] == 5
@pytest.mark.parametrize("redirect", (False, True))
def test_too_short_statistics(redirect):
args = ["-a", "TTAGACATATCTCCGTCG", "-m", "24", "-o", os.devnull, datapath("small.fastq")]
if redirect:
args[:0] = ["--too-short-output", os.devnull]
stats = main(args)
assert stats.with_adapters[0] == 2
assert stats.written == 2
assert stats.written_bp[0] == 58
assert stats.filtered["too_short"] == 1
def test_maximum_length(run):
run("-M 5 -a TTAGACATATCTCCGTCG", "maxlen.fa", "lengths.fa")
def test_too_long(run, tmp_path, cores):
too_long_path = tmp_path / 'toolong.fa'
stats = run([
"--cores", str(cores),
"-M", "5",
"-a", "TTAGACATATCTCCGTCG",
"--too-long-output", too_long_path
], "maxlen.fa", "lengths.fa")
assert_files_equal(datapath('toolong.fa'), too_long_path)
assert stats.filtered["too_long"] == 5
def test_length_tag(run):
run("-n 3 -e 0.1 --length-tag length= "
"-b TGAGACACGCAACAGGGGAAAGGCAAGGCACACAGGGGATAGG "
"-b TCCATCTCATCCCTGCGTGTCCCATCTGTTCCCTCCCTGTCTCA", '454.fa', '454.fa')
@pytest.mark.parametrize("length", list(range(3, 11)))
def test_overlap_a(tmp_path, length):
adapter = "catatctccg"
record = ">read\nGAGACCATTCCAATG" + adapter[:length] + '\n'
input = tmp_path / "overlap.fasta"
input.write_text(record)
if length < 7:
expected = record
else:
expected = '>read\nGAGACCATTCCAATG\n'
output = tmp_path / "overlap-trimmed.fasta"
main(["-O", "7", "-e", "0", "-a", adapter, "-o", str(output), str(input)])
assert expected == output.read_text()
def test_overlap_b(run):
run("-O 10 -b TTAGACATATCTCCGTCG", "overlapb.fa", "overlapb.fa")
def test_trim_n(run):
run("--trim-n", "trim-n.fasta", "trim-n.fasta")
def test_qualtrim(run):
run("-q 10 -a XXXXXX", "lowqual.fastq", "lowqual.fastq")
def test_qualbase(run):
run("-q 10 --quality-base 64 -a XXXXXX", "illumina64.fastq", "illumina64.fastq")
def test_quality_trim_only(run):
run("-q 10 --quality-base 64", "illumina64.fastq", "illumina64.fastq")
def test_twoadapters(run):
run("-a AATTTCAGGAATT -a GTTCTCTAGTTCT", "twoadapters.fasta", "twoadapters.fasta")
def test_polya(run):
run("-m 24 -O 10 -a AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "polya.fasta", "polya.fasta")
def test_polya_brace_notation(run):
run("-m 24 -O 10 -a A{35}", "polya.fasta", "polya.fasta")
def test_no_trim(run):
run("--no-trim --discard-untrimmed -a CCCTAGTTAAAC", 'no-trim.fastq', 'small.fastq')
def test_action_none(run):
run("--action=none --discard-untrimmed -a CCCTAGTTAAAC", 'no-trim.fastq', 'small.fastq')
def test_mask_adapter(run):
run("-b CAAG -n 3 --mask-adapter", "anywhere_repeat.fastq", "anywhere_repeat.fastq")
def test_action_mask(run):
run("-b CAAG -n 3 --action=mask", "anywhere_repeat.fastq", "anywhere_repeat.fastq")
def test_action_lowercase(run):
run("-b CAAG -n 3 --action=lowercase", "action_lowercase.fasta", "action_lowercase.fasta")
def test_action_retain(run):
run("-g GGTTAACC -a CAAG --action=retain", "action_retain.fasta", "action_retain.fasta")
def test_action_retain_times():
with pytest.raises(SystemExit):
main(["-a", "ACGT", "--times=2", "--action=retain", datapath("small.fastq")])
def test_gz_multiblock(run):
run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.gz")
def test_read_wildcard(run):
run("--match-read-wildcards -b ACGTACGT", "wildcard.fa", "wildcard.fa")
@pytest.mark.parametrize("adapter_type,expected", [
("-a", "wildcard_adapter.fa"),
("-b", "wildcard_adapter_anywhere.fa"),
])
def test_adapter_wildcard(adapter_type, expected, run, tmp_path, cores):
wildcard_path = tmp_path / "wildcards.txt"
run([
"--cores", str(cores),
"--wildcard-file", wildcard_path,
adapter_type, "ACGTNNNACGT"
], expected, "wildcard_adapter.fa")
with open(wildcard_path) as wct:
lines = wct.readlines()
lines = [line.strip() for line in lines]
assert lines == ["AAA 1", "GGG 2", "CCC 3b", "TTT 4b"]
def test_wildcard_N(run):
run("-e 0 -a GGGGGGG --match-read-wildcards", "wildcardN.fa", "wildcardN.fa")
def test_illumina_adapter_wildcard(run):
run("-a VCCGAMCYUCKHRKDCUBBCNUWNSGHCGU", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_front(run):
run("--front ADAPTER -N", "examplefront.fa", "example.fa")
def test_literal_N(run):
run("-N -e 0.2 -a NNNNNNNNNNNNNN", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2(run):
run("-N -O 1 -g NNNNNNNNNNNNNN", "trimN5.fasta", "trimN5.fasta")
def test_literal_N_brace_notation(run):
run("-N -e 0.2 -a N{14}", "trimN3.fasta", "trimN3.fasta")
def test_literal_N2_brace_notation(run):
run("-N -O 1 -g N{14}", "trimN5.fasta", "trimN5.fasta")
def test_anchored_front(run):
run("-g ^FRONTADAPT -N", "anchored.fasta", "anchored.fasta")
def test_anchored_front_ellipsis_notation(run):
run("-a ^FRONTADAPT... -N", "anchored.fasta", "anchored.fasta")
def test_anchored_back(run):
run("-a BACKADAPTER$ -N", "anchored-back.fasta", "anchored-back.fasta")
def test_anchored_back_ellipsis_notation(run):
run("-a ...BACKADAPTER$ -N", "anchored-back.fasta", "anchored-back.fasta")
def test_anchored_back_no_indels(run):
run("-a BACKADAPTER$ -N --no-indels", "anchored-back.fasta", "anchored-back.fasta")
def test_no_indels(run):
run('-a TTAGACATAT -g GAGATTGCCA --no-indels', 'no_indels.fasta', 'no_indels.fasta')
def test_ellipsis_notation(run):
run('-a ...TTAGACATAT -g GAGATTGCCA --no-indels', 'no_indels.fasta', 'no_indels.fasta')
def test_issue_46(run, tmp_path):
run("--anywhere=AACGTN --wildcard-file={}".format(
tmp_path / "wildcards.txt"), "issue46.fasta", "issue46.fasta")
def test_strip_suffix(run):
run("--strip-suffix _sequence -a XXXXXXX", "stripped.fasta", "simple.fasta")
def test_info_file(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "-a", "adapt=GCCGAACTTCTTAGACTGCCTTAAGGACGT"],
"illumina.fastq", "illumina.fastq.gz")
assert_files_equal(cutpath("illumina.info.txt"), info_path, ignore_trailing_space=True)
def test_info_file_times(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "--times", "2", "-a", "adapt=GCCGAACTTCTTA",
"-a", "adapt2=GACTGCCTTAAGGACGT"], "illumina5.fastq", "illumina5.fastq")
assert_files_equal(cutpath('illumina5.info.txt'), info_path, ignore_trailing_space=True)
def test_info_file_fasta(run, tmp_path, cores):
info_path = tmp_path / "info.txt"
run(["--cores", str(cores), "--info-file", info_path, "-a", "TTAGACATAT", "-g", "GAGATTGCCA", "--no-indels"],
"no_indels.fasta", "no_indels.fasta")
def test_info_file_revcomp(run, tmp_path):
info_path = tmp_path / "info-rc.txt"
main([
"--info-file", str(info_path),
"-a", "adapt=GAGTCG",
"--revcomp",
"--rename={header}",
"-o", str(tmp_path / "out.fasta"),
datapath("info-rc.fasta")
])
assert_files_equal(cutpath("info-rc.txt"), info_path)
def test_named_adapter(run):
run("-a MY_ADAPTER=GCCGAACTTCTTAGACTGCCTTAAGGACGT", "illumina.fastq", "illumina.fastq.gz")
def test_adapter_with_u(run):
run("-a GCCGAACUUCUUAGACUGCCUUAAGGACGU", "illumina.fastq", "illumina.fastq.gz")
def test_bzip2_input(run, cores):
run(["--cores", str(cores), "-a", "TTAGACATATCTCCGTCG"], "small.fastq", "small.fastq.bz2")
@pytest.mark.parametrize("extension", ["bz2", "xz", "gz"])
def test_compressed_output(tmp_path, cores, extension):
out_path = str(tmp_path / ("small.fastq." + extension))
params = [
"--cores", str(cores), "-a", "TTAGACATATCTCCGTCG", "-o", out_path, datapath("small.fastq")]
main(params)
def test_bzip2_multiblock(run):
run("-b TTAGACATATCTCCGTCG", "small.fastq", "multiblock.fastq.bz2")
def test_xz(run):
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'small.fastq.xz')
def test_no_args():
with pytest.raises(SystemExit):
main([])
def test_two_fastqs():
with pytest.raises(SystemExit):
main([datapath('paired.1.fastq'), datapath('paired.2.fastq')])
def test_anchored_no_indels(run):
run('-g ^TTAGACATAT --no-indels -e 0.1', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_read(run):
run('-g ^TTAGACATAT --match-read-wildcards --no-indels -e 0.1',
'anchored_no_indels_wildcard.fasta', 'anchored_no_indels.fasta')
def test_anchored_no_indels_wildcard_adapt(run):
run('-g ^TTAGACANAT --no-indels -e 0.12', 'anchored_no_indels.fasta', 'anchored_no_indels.fasta')
def test_non_iupac_characters(run):
with pytest.raises(SystemExit):
main(['-a', 'ZACGT', datapath('small.fastq')])
def test_unconditional_cut_front(run):
run('-u 5', 'unconditional-front.fastq', 'small.fastq')
def test_unconditional_cut_back(run):
run('-u -5', 'unconditional-back.fastq', 'small.fastq')
def test_unconditional_cut_both(run):
run('-u -5 -u 5', 'unconditional-both.fastq', 'small.fastq')
def test_unconditional_cut_too_many_commas():
with pytest.raises(SystemExit):
main(["-u", "5,7,8", datapath("small.fastq")])
def test_unconditional_cut_invalid_number():
with pytest.raises(SystemExit):
main(["-u", "a,b", datapath("small.fastq")])
def test_untrimmed_output(run, cores, tmp_path):
path = tmp_path / "untrimmed.fastq"
stats = run(["--cores", str(cores), "-a", "TTAGACATATCTCCGTCG", "--untrimmed-output", path],
"small.trimmed.fastq", "small.fastq")
assert_files_equal(cutpath("small.untrimmed.fastq"), path)
assert stats.with_adapters[0] == 2
assert stats.written == 2
assert stats.written_bp[0] == 46
def test_adapter_file(run):
run('-a file:' + datapath('adapter.fasta'), 'illumina.fastq', 'illumina.fastq.gz')
def test_adapter_file_5p_anchored(run):
run('-N -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored(run):
run('-N -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_adapter_file_5p_anchored_no_indels(run):
run('-N --no-indels -g file:' + datapath('prefix-adapter.fasta'), 'anchored.fasta', 'anchored.fasta')
def test_adapter_file_3p_anchored_no_indels(run):
run('-N --no-indels -a file:' + datapath('suffix-adapter.fasta'), 'anchored-back.fasta', 'anchored-back.fasta')
def test_adapter_file_empty_name(run):
run('-N -a file:' + datapath('adapter-empty-name.fasta'), 'illumina.fastq', 'illumina.fastq.gz')
@pytest.mark.parametrize("ext", ["", ".gz"])
def test_demultiplex(cores, tmp_path, ext):
multiout = str(tmp_path / 'tmp-demulti.{name}.fasta') + ext
params = [
'--cores', str(cores),
'-a', 'first=AATTTCAGGAATT',
'-a', 'second=GTTCTCTAGTTCT',
'-o', multiout,
datapath('twoadapters.fasta'),
]
main(params)
for name in ("first", "second", "unknown"):
actual = multiout.format(name=name)
if ext == ".gz":
subprocess.run(["gzip", "-d", actual], check=True)
actual = actual[:-3]
expected = cutpath("twoadapters.{name}.fasta".format(name=name))
assert_files_equal(expected, actual)
def test_multiple_fake_anchored_adapters(run):
run("-g ^CGTCCGAAGTAGC -g ^ATTGCCCTAG "
"-a TTCCATGCAGCATT$ -a CCAGTCCCCCC$ "
"-a GCCGAACTTCTTAGACTGCCTTAAGGACGT",
"illumina.fastq",
"illumina.fastq.gz")
def test_multiple_prefix_adapters(run):
run("-g ^GTACGGATTGTTCAGTA -g ^TATTAAGCTCATTC", "multiprefix.fasta", "multi.fasta")
def test_multiple_prefix_adapters_noindels(run):
run("--no-indels -g ^GTACGGATTGTTCAGTA -g ^TATTAAGCTCATTC", "multiprefix.fasta", "multi.fasta")
def test_multiple_suffix_adapters_noindels(run):
run("--no-indels -a CGTGATTATCTTGC$ -a CCTATTAGTGGTTGAAC$", "multisuffix.fasta", "multi.fasta")
def test_max_n(run):
assert run('--max-n 0', 'maxn0.fasta', 'maxn.fasta').filtered["too_many_n"] == 4
assert run('--max-n 1', 'maxn1.fasta', 'maxn.fasta').filtered["too_many_n"] == 2
assert run('--max-n 2', 'maxn2.fasta', 'maxn.fasta').filtered["too_many_n"] == 1
assert run('--max-n 0.2', 'maxn0.2.fasta', 'maxn.fasta').filtered["too_many_n"] == 3
assert run('--max-n 0.4', 'maxn0.4.fasta', 'maxn.fasta').filtered["too_many_n"] == 2
def test_quiet_is_quiet():
captured_standard_output = StringIO()
captured_standard_error = StringIO()
setattr(captured_standard_output, "buffer", BytesIO())
setattr(captured_standard_error, "buffer", BytesIO())
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = captured_standard_output
sys.stderr = captured_standard_error
main(['-o', os.devnull, '--quiet', datapath('small.fastq')])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
assert captured_standard_output.getvalue() == ''
assert captured_standard_error.getvalue() == ''
assert getattr(captured_standard_output, "buffer").getvalue() == b''
assert getattr(captured_standard_output, "buffer").getvalue() == b''
def test_x_brace_notation():
main(['-o', os.devnull, '--quiet', '-a', 'X{5}', datapath('small.fastq')])
def test_nextseq(run):
run('--nextseq-trim 22', 'nextseq.fastq', 'nextseq.fastq')
def test_linked_explicitly_anchored(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT', 'linked.fasta', 'linked.fasta')
def test_linked_multiple(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT -a ^AAAAAAAAAA...GCGCGCGCGC', 'linked.fasta', 'linked.fasta')
def test_linked_both_anchored(run):
run('-a ^AAAAAAAAAA...TTTTT$', 'linked-anchored.fasta', 'linked.fasta')
def test_linked_5p_not_anchored(run):
run('-g AAAAAAAAAA...TTTTTTTTTT', 'linked-not-anchored.fasta', 'linked.fasta')
def test_linked_discard_untrimmed(run):
run('-a ^AAAAAAAAAA...TTTTTTTTTT --discard-untrimmed', 'linked-discard.fasta', 'linked.fasta')
def test_linked_discard_untrimmed_g(run):
run('-g AAAAAAAAAA...TTTTTTTTTT --discard-untrimmed', 'linked-discard-g.fasta', 'linked.fasta')
def test_linked_lowercase(run):
run('-a ^AACCGGTTTT...GGGGGGG$ -a ^AAAA...TTTT$ --times=2 --action=lowercase',
'linked-lowercase.fasta', 'linked.fasta')
def test_linked_info_file(tmp_path):
info_path = tmp_path / 'info.txt'
main(['-a linkedadapter=^AAAAAAAAAA...TTTTTTTTTT', '--info-file', str(info_path),
'-o', str(tmp_path / 'out.fasta'), datapath('linked.fasta')])
assert_files_equal(cutpath('linked-info.txt'), info_path, ignore_trailing_space=True)
def test_linked_anywhere():
with pytest.raises(SystemExit):
main(['-b', 'AAA...TTT', datapath('linked.fasta')])
def test_anywhere_anchored_5p():
with pytest.raises(SystemExit):
main(['-b', '^AAA', datapath('small.fastq')])
def test_anywhere_anchored_3p():
with pytest.raises(SystemExit):
main(['-b', 'TTT$', datapath('small.fastq')])
def test_fasta(run):
run('-a TTAGACATATCTCCGTCG', 'small.fasta', 'small.fastq')
def test_fasta_no_trim(run):
run([], 'small-no-trim.fasta', 'small.fastq')
def test_length(run):
run('--length 5', 'shortened.fastq', 'small.fastq')
def test_negative_length(run):
run('--length -5', 'shortened-negative.fastq', 'small.fastq')
@pytest.mark.timeout(0.5)
def test_issue_296(tmp_path):
info_path = tmp_path / 'info.txt'
reads_path = tmp_path / 'reads.fasta'
out_path = tmp_path / 'out.fasta'
reads_path.write_text(">read\nCACAAA\n")
main([
"--info-file", str(info_path),
"--no-trim",
"-g", "TTTCAC",
"-o", str(out_path),
str(reads_path),
])
assert_files_equal(reads_path, out_path)
def test_xadapter(run):
run('-g XTCCGAATAGA', 'xadapter.fasta', 'xadapterx.fasta')
def test_adapterx(run):
run('-a TCCGAATAGAX', 'adapterx.fasta', 'xadapterx.fasta')
def test_discard_casava(run):
stats = run('--discard-casava', 'casava.fastq', 'casava.fastq')
assert stats.filtered["casava_filtered"] == 1
def test_underscore(run):
run('-b TTAGACATATCTCCGTCG', 'small.fastq', 'underscore_fastq.gz')
def test_cores_autodetect(run):
run('--cores 0 -b TTAGACATATCTCCGTCG', 'small.fastq', 'underscore_fastq.gz')
def test_write_compressed_fastq(cores, tmp_path):
main(['--cores', str(cores), '-o', str(tmp_path / 'out.fastq.gz'), datapath('small.fastq')])
def test_minimal_report(run):
run('-b TTAGACATATCTCCGTCG --report=minimal', 'small.fastq', 'small.fastq')
def test_paired_separate(run):
run("-a TTAGACATAT", "paired-separate.1.fastq", "paired.1.fastq")
run("-a CAGTGGAGTA", "paired-separate.2.fastq", "paired.2.fastq")
def test_empty_read_with_wildcard_in_adapter(run):
run("-g CWC", "empty.fastq", "empty.fastq")
def test_print_progress_to_tty(tmp_path, mocker):
mocker.patch("cutadapt.utils.sys.stderr").isatty.return_value = True
main(["-o", str(tmp_path / "out.fastq"), datapath("small.fastq")])
def test_adapter_order(run):
run("-g ^AAACC -a CCGGG", "adapterorder-ga.fasta", "adapterorder.fasta")
run("-a CCGGG -g ^AAACC", "adapterorder-ag.fasta", "adapterorder.fasta")
def test_reverse_complement_no_rc_suffix(run, tmp_path):
out_path = tmp_path / "out.fastq"
main([
"-o", str(out_path),
"--revcomp",
"--no-index",
"--rename", "{header}",
"-g", "^TTATTTGTCT",
"-g", "^TCCGCACTGG",
datapath("revcomp.1.fastq")
])
with dnaio.open(out_path) as f:
reads = list(f)
assert len(reads) == 6
assert reads[1].name == "read2/1"
assert reads[1].sequence == "ACCATCCGATATGTCTAATGTGGCCTGTTG"
def test_reverse_complement_normalized(run):
stats = run(
"--revcomp --no-index -g ^TTATTTGTCT -g ^TCCGCACTGG",
"revcomp-single-normalize.fastq",
"revcomp.1.fastq",
)
assert stats.n == 6
assert stats.reverse_complemented == 2
def test_reverse_complement_and_info_file(run, tmp_path, cores):
info_path = str(tmp_path / "info.txt")
run(
[
"--revcomp",
"--no-index",
"-g",
"^TTATTTGTCT",
"-g",
"^TCCGCACTGG",
"--info-file",
info_path,
],
"revcomp-single-normalize.fastq",
"revcomp.1.fastq",
)
with open(info_path) as f:
lines = f.readlines()
assert len(lines) == 6
assert lines[0].split("\t")[0] == "read1/1"
assert lines[1].split("\t")[0] == "read2/1 rc"
def test_max_expected_errors(run, cores):
stats = run("--max-ee=0.9", "maxee.fastq", "maxee.fastq")
assert stats.filtered["too_many_expected_errors"] == 2
def test_max_expected_errors_fasta(tmp_path):
path = tmp_path / "input.fasta"
path.write_text(">read\nACGTACGT\n")
main(["--max-ee=0.001", "-o", os.devnull, str(path)])
def test_warn_if_en_dashes_used():
with pytest.raises(SystemExit):
main(["–q", "25", "-o", os.devnull, "in.fastq"])
@pytest.mark.parametrize("opt", ["-y", "--suffix"])
def test_suffix(opt, run):
run([opt, ' {name}', '-e', '0', '-a', 'OnlyT=TTTTTTTT', '-a', 'OnlyG=GGGGGGGG'], "suffix.fastq", "suffix.fastq")
@pytest.mark.parametrize("opt", ["--prefix", "--suffix"])
def test_rename_cannot_be_combined_with_other_renaming_options(opt):
with pytest.raises(SystemExit):
main([opt, "something", "--rename='{id} {comment} extrainfo'", "-o", os.devnull, datapath("empty.fastq")])
def test_rename(run):
run([
"--rename={id}_{cut_suffix} {header} {adapter_name}",
"--cut=-4",
"-a", "OnlyT=TTTTTT",
"-a", "OnlyG=GGGGGG",
], "rename.fastq", "suffix.fastq")
@pytest.mark.skip("This has not been fixed")
def test_terminates_correctly_on_error_in_subprocess(tmp_path):
params = [
"-j", "2",
"-o", str(tmp_path / "out.fastq.gz"),
datapath("format-error.fastq"),
]
with pytest.raises(SystemExit):
main(params)
| true | true |
f726506396cd55e3c14fcaaeebe3a90d09e1dcf1 | 851 | py | Python | tests/cupy_tests/core_tests/test_syncdetect.py | svlandeg/cupy | 484e007d5bf58a0445af2f6e7aa3fdfe0fcc2363 | [
"MIT"
] | 6,180 | 2016-11-01T14:22:30.000Z | 2022-03-31T08:39:20.000Z | tests/cupy_tests/core_tests/test_syncdetect.py | svlandeg/cupy | 484e007d5bf58a0445af2f6e7aa3fdfe0fcc2363 | [
"MIT"
] | 6,281 | 2016-12-22T07:42:31.000Z | 2022-03-31T19:57:02.000Z | tests/cupy_tests/core_tests/test_syncdetect.py | svlandeg/cupy | 484e007d5bf58a0445af2f6e7aa3fdfe0fcc2363 | [
"MIT"
] | 829 | 2017-02-23T05:46:12.000Z | 2022-03-27T17:40:03.000Z | import unittest
import pytest
import cupy
import cupyx
class TestSyncDetect(unittest.TestCase):
def test_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
a.get()
def test_nested_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_nested_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with cupyx.allow_synchronize(True):
a.get()
| 25.029412 | 61 | 0.591069 | import unittest
import pytest
import cupy
import cupyx
class TestSyncDetect(unittest.TestCase):
def test_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
a.get()
def test_nested_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_nested_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with cupyx.allow_synchronize(True):
a.get()
| true | true |
f72650f004500bff57f9152bebe02c0607cf7d24 | 567 | py | Python | BOJ14405.py | INYEONGKIM/BOJ | 5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc | [
"MIT"
] | 2 | 2019-03-05T15:42:46.000Z | 2019-07-24T15:52:36.000Z | BOJ14405.py | INYEONGKIM/BOJ | 5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc | [
"MIT"
] | null | null | null | BOJ14405.py | INYEONGKIM/BOJ | 5e83d77a92d18b0d20d26645c7cfe4ba3e2d25bc | [
"MIT"
] | null | null | null | s=input();f=True
try:
while s!="":
if s[0]=="p":
t=s[:2]
if t=="pi":
s=s[2:]
else:
f=False; break
elif s[0]=="k":
t=s[:2]
if t=="ka":
s=s[2:]
else:
f=False; break
elif s[0]=="c":
t=s[:3]
if t=="chu":
s=s[3:]
else:
f=False; break
else:
f=False; break
except:
f=False
if f:
print("YES")
else:
print("NO")
| 18.9 | 30 | 0.294533 | s=input();f=True
try:
while s!="":
if s[0]=="p":
t=s[:2]
if t=="pi":
s=s[2:]
else:
f=False; break
elif s[0]=="k":
t=s[:2]
if t=="ka":
s=s[2:]
else:
f=False; break
elif s[0]=="c":
t=s[:3]
if t=="chu":
s=s[3:]
else:
f=False; break
else:
f=False; break
except:
f=False
if f:
print("YES")
else:
print("NO")
| true | true |
f726518fe0feae8b103c50e15118f179206e6821 | 374 | py | Python | build/check_obstacle/catkin_generated/pkg.installspace.context.pc.py | EurobotMDX/eurobot_2020_odroid_cam | ddd9a17d53899f1c615816fd74512c112ecad188 | [
"MIT"
] | 4 | 2019-10-26T18:48:51.000Z | 2020-02-27T19:31:36.000Z | build/check_obstacle/catkin_generated/pkg.installspace.context.pc.py | EurobotMDX/eurobot_2020_odroid_cam | ddd9a17d53899f1c615816fd74512c112ecad188 | [
"MIT"
] | null | null | null | build/check_obstacle/catkin_generated/pkg.installspace.context.pc.py | EurobotMDX/eurobot_2020_odroid_cam | ddd9a17d53899f1c615816fd74512c112ecad188 | [
"MIT"
] | 1 | 2019-10-26T18:50:48.000Z | 2019-10-26T18:50:48.000Z | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "check_obstacle"
PROJECT_SPACE_DIR = "/home/ros/lidar_ws/install"
PROJECT_VERSION = "0.0.0"
| 41.555556 | 68 | 0.705882 |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "check_obstacle"
PROJECT_SPACE_DIR = "/home/ros/lidar_ws/install"
PROJECT_VERSION = "0.0.0"
| true | true |
f7265193a85ba6ceeb41dc4f556930b01622c5a0 | 2,456 | py | Python | python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/ir/inference/test_identity_scale_clean_pass.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-12-09T08:59:17.000Z | 2021-12-09T08:59:17.000Z | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from auto_scan_test import PassAutoScanTest
from program_config import TensorConfig, ProgramConfig, OpConfig
import paddle.inference as paddle_infer
import unittest
import hypothesis.strategies as st
class TestIdentityScaleCleanPass(PassAutoScanTest):
def sample_predictor_configs(self, program_config):
config = self.create_trt_inference_config()
config.enable_tensorrt_engine(
max_batch_size=8,
workspace_size=0,
min_subgraph_size=0,
precision_mode=paddle_infer.PrecisionType.Float32,
use_static=False,
use_calib_mode=False)
yield config, ['relu'], (1e-5, 1e-5)
def sample_program_config(self, draw):
bias_after_scale = draw(st.booleans())
n = draw(st.integers(min_value=1, max_value=4))
c = draw(st.integers(min_value=1, max_value=20))
h = draw(st.integers(min_value=1, max_value=20))
w = draw(st.integers(min_value=1, max_value=20))
relu_op = OpConfig("relu",
inputs={"X": ["relu_x"]},
outputs={"Out": ["relu_out"]})
scale_op = OpConfig("scale",
inputs={"X": ["relu_out"]},
outputs={"Out": ["scale_out"]},
bias=0.,
scale=1.,
bias_after_scale=True)
program_config = ProgramConfig(
ops=[relu_op, scale_op],
weights={},
inputs={"relu_x": TensorConfig(shape=[n, c, h, w])},
outputs=["scale_out"])
return program_config
def test(self):
self.run_and_statis(max_examples=25,
passes=["identity_scale_op_clean_pass"])
if __name__ == "__main__":
unittest.main()
| 37.212121 | 74 | 0.623779 |
import numpy as np
from auto_scan_test import PassAutoScanTest
from program_config import TensorConfig, ProgramConfig, OpConfig
import paddle.inference as paddle_infer
import unittest
import hypothesis.strategies as st
class TestIdentityScaleCleanPass(PassAutoScanTest):
def sample_predictor_configs(self, program_config):
config = self.create_trt_inference_config()
config.enable_tensorrt_engine(
max_batch_size=8,
workspace_size=0,
min_subgraph_size=0,
precision_mode=paddle_infer.PrecisionType.Float32,
use_static=False,
use_calib_mode=False)
yield config, ['relu'], (1e-5, 1e-5)
def sample_program_config(self, draw):
bias_after_scale = draw(st.booleans())
n = draw(st.integers(min_value=1, max_value=4))
c = draw(st.integers(min_value=1, max_value=20))
h = draw(st.integers(min_value=1, max_value=20))
w = draw(st.integers(min_value=1, max_value=20))
relu_op = OpConfig("relu",
inputs={"X": ["relu_x"]},
outputs={"Out": ["relu_out"]})
scale_op = OpConfig("scale",
inputs={"X": ["relu_out"]},
outputs={"Out": ["scale_out"]},
bias=0.,
scale=1.,
bias_after_scale=True)
program_config = ProgramConfig(
ops=[relu_op, scale_op],
weights={},
inputs={"relu_x": TensorConfig(shape=[n, c, h, w])},
outputs=["scale_out"])
return program_config
def test(self):
self.run_and_statis(max_examples=25,
passes=["identity_scale_op_clean_pass"])
if __name__ == "__main__":
unittest.main()
| true | true |
f72651d2708561bc7a3f8b8ff37df2a1572eaac2 | 15,984 | py | Python | LSTM for language modeling/Question2_Part_1_To_2.py | sotudian/Natural-Language-Processing | 61ba2ac78e440683519d2121ca2b29a17277e46b | [
"Apache-2.0"
] | null | null | null | LSTM for language modeling/Question2_Part_1_To_2.py | sotudian/Natural-Language-Processing | 61ba2ac78e440683519d2121ca2b29a17277e46b | [
"Apache-2.0"
] | null | null | null | LSTM for language modeling/Question2_Part_1_To_2.py | sotudian/Natural-Language-Processing | 61ba2ac78e440683519d2121ca2b29a17277e46b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Train the language model on texts from the file pride And Prejudice. Before using it to train the language model,
you need to first sentence segment, then tokenize, then lower case each line of the file using Spacy. Append
start-of-sentence token ’<s>’ and end-of-sentence ’</s>’ token to each sentence and put each sentence in its own line.
Use only words that appear more than once in this corpus and assign UNK tokens for the rest; you may also need to
pad sentences that are shorter than 5. Train the language model and save the trained model. Generate 10 examples
of text from it, starting from ’<s>’ token and ending at ’</s>’ token.
@author: shahab Sotudian
"""
import re
import pickle
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.lm.preprocessing import pad_both_ends
from collections import Counter
import math
# Functions ###########================-------------------
'''
############################################################
#### Piazza calculate Preplexity
net.cuda()
net.eval()
H = 0
TOTAL_PROBs = 1
with torch.no_grad():
for Test_Sentence in Test_1_Preprocessed_Pride_Text_Perplexity:
H += len(Test_Sentence)
# Calculate for each sentence
Total_prob_Sentence = 1
for i,word in enumerate(Test_Sentence):
if i == len(Test_Sentence)-1:
continue
else:
if i==0:
h = net.init_hidden(1)
h = tuple([each.data for each in h])
else:
h = h_new
x = np.array([[word2idx[word]]])
inputs = torch.from_numpy(x)
inputs = inputs.cuda()
out, h_new = net(inputs, h)
# get the token probabilities
p = F.softmax(out, dim=1).data
p = p.cpu()
p = p.numpy()
p = p.reshape(p.shape[1],)
Prob_next_Word = p[word2idx[Test_Sentence[i+1]]] # P(w4|w1,w2,w3)
Total_prob_Sentence = Prob_next_Word * Total_prob_Sentence
TOTAL_PROBs = TOTAL_PROBs * Total_prob_Sentence
Preplexity = (1/TOTAL_PROBs)**(1/float(H))
############################################################
'''
def NLP_PreProcessing(text_main):
# sentence segmenting
sentences = nltk.sent_tokenize(text_main)
# Tokenization + lower casing
Tokenized_sentences = [word_tokenize(S.lower()) for S in sentences]
# Padding
Pad_Tokenized_sentences = [list(pad_both_ends(TS, n=2)) for TS in Tokenized_sentences]
return Pad_Tokenized_sentences
def NLP_PreProcessing_Test(text_main):
# Tokenization + lower casing
Tokenized_sentences = word_tokenize(text_main.lower())
# Padding
Pad_Tokenized_sentences = [list(pad_both_ends(Tokenized_sentences, n=2))]
return Pad_Tokenized_sentences
def Equal_seq(text, seq_len):
sequences = []
if len(text) > seq_len:
for i in range(seq_len, (len(text)+1)):
seq = text[i-seq_len:i]
sequences.append(seq)
else:
sequences = [['_PAD']*(seq_len-len(text)) + text ]
return sequences
def get_batches(arr_x, arr_y, batch_size):
# iterate through the arrays
prv = 0
for n in range(batch_size, arr_x.shape[0], batch_size):
x = arr_x[prv:n,:]
y = arr_y[prv:n,:]
prv = n
yield x, y
class WordLSTM(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size, 200)
## define the LSTM
self.lstm = nn.LSTM(200, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
## define a dropout layer
self.dropout = nn.Dropout(drop_prob)
## define the fully-connected layer
self.fc = nn.Linear(n_hidden, vocab_size)
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## pass input through embedding layer
embedded = self.emb_layer(x)
## Get the outputs and the new hidden state from the lstm
lstm_output, hidden = self.lstm(embedded, hidden)
## pass through a dropout layer
out = self.dropout(lstm_output)
#out = out.contiguous().view(-1, self.n_hidden)
out = out.reshape(-1, self.n_hidden)
## put "out" through the fully-connected layer
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
# if GPU is available
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
# if GPU is not available
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
def train(net, epochs, batch_size, lr, clip, print_every,XX,YY):
# optimizer
opt = torch.optim.Adam(net.parameters(), lr=lr)
# loss
criterion = nn.CrossEntropyLoss()
# push model to GPU
net.cuda()
counter = 0
net.train()
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(XX, YY, batch_size):
counter+= 1
# convert numpy arrays to PyTorch arrays
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
# push tensors to GPU
inputs, targets = inputs.cuda(), targets.cuda()
# detach hidden states
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(-1))
# back-propagate error
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
# update weigths
opt.step()
if counter % print_every == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter))
def predict(net, tkn, h=None, word2idx_Inp = None, idx2word_Inp =None ):
# tensor inputs
x = np.array([[word2idx_Inp[tkn]]])
inputs = torch.from_numpy(x)
# push to GPU
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the token probabilities
p = F.softmax(out, dim=1).data
p = p.cpu()
p = p.numpy()
p = p.reshape(p.shape[1],)
# get indices of top 3 values
top_n_idx = p.argsort()[-3:][::-1]
# randomly select one of the three indices
sampled_token_index = top_n_idx[random.sample([0,1,2],1)[0]]
# return the encoded value of the predicted char and the hidden state
return idx2word_Inp[sampled_token_index], h
# function to generate text
def sample(net, size, prime="<s>",word2idx_Inp = None, idx2word_Inp =None ):
# push to GPU
net.cuda()
net.eval()
# batch size is 1
h = net.init_hidden(1)
toks = prime.split()
# predict next token
for t in prime.split():
token, h = predict(net, t, h,word2idx_Inp,idx2word_Inp)
toks.append(token)
# predict subsequent tokens
if size == '</s>':
while(token!='</s>'):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
else:
for i in range(size-1):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
return ' '.join(toks)
def Testing(net, batch_size,Test_X,Test_Y):
net.eval()
criterion = nn.CrossEntropyLoss()
# initialize hidden state
h = net.init_hidden(batch_size)
test_loss = 0.
with torch.no_grad():
for x, y in get_batches(Test_X, Test_Y, batch_size):
# convert numpy arrays to PyTorch arrays
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
# push tensors to GPU
inputs, targets = inputs.cuda(), targets.cuda()
# detach hidden states
h = tuple([each.data for each in h])
# get the output from the model
output, h = net(inputs, h)
test_loss += criterion(output, targets.view(-1)).item()
test_loss = test_loss / ((len(Test_X) // batch_size)+1)
print('-' * 40)
print('Test loss {:5.2f} ------ Test perplexity {:8.2f}'.format(test_loss, math.exp(test_loss)))
print('-' * 40)
class WordLSTM_with_Glove(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size_Q6,100, padding_idx=0)
self.emb_layer.weight.data.copy_(torch.from_numpy(embedding_matrix))
self.emb_layer.weight.requires_grad = False ## freeze embeddings
'''
self.emb_layer = nn.Embedding(vocab_size_Q6,100)
self.emb_layer.weight = nn.Parameter(torch.from_numpy(embedding_matrix).float())
'''
## define the LSTM
self.lstm = nn.LSTM(100, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
## define a dropout layer
self.dropout = nn.Dropout(drop_prob)
## define the fully-connected layer
self.fc = nn.Linear(n_hidden, vocab_size_Q6)
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## pass input through embedding layer
embedded = self.emb_layer(x)
## Get the outputs and the new hidden state from the lstm
lstm_output, hidden = self.lstm(embedded, hidden)
## pass through a dropout layer
out = self.dropout(lstm_output)
#out = out.contiguous().view(-1, self.n_hidden)
out = out.reshape(-1, self.n_hidden)
## put "out" through the fully-connected layer
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
# if GPU is available
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
# if GPU is not available
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
# Data ###########================-------------------
with open('prideAndPrejudice.txt') as f:
Pride_Text = [line.rstrip() for line in f]
# Q2.1 ###########================-------------------
# sentence segmenting + lower casing + Tokenization + Padding using function NLP_PreProcessing
Preprocessed_Pride_Text = []
for t in range(len(Pride_Text)):
Preprocessed_Pride_Text = Preprocessed_Pride_Text + NLP_PreProcessing(Pride_Text[t])
Length_of_Sequences = 5
Pride_Text_Equal_seqs_L5 = sum([Equal_seq(i,Length_of_Sequences) for i in Preprocessed_Pride_Text], [])
del t,f
# Create Vocab
words = Counter()
for i, sentence in enumerate(Preprocessed_Pride_Text):
for word in sentence:
words.update([word])
words = {k:v for k,v in words.items() if v>1} # Removing the words that only appear once
del i,sentence,word
words = sorted(words, key=words.get, reverse=True) # Sorting the words
words = ['_PAD','_UNK'] + words
word2idx = {o:i for i,o in enumerate(words)}
idx2word = {i:o for i,o in enumerate(words)}
# Looking up the mapping dictionary and assigning the index to the respective words
Pride_Text_Equal_seqs_INDICES_L5 =[]
for i, sentence in enumerate(Pride_Text_Equal_seqs_L5):
Pride_Text_Equal_seqs_INDICES_L5.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
X = []
Y = []
for S in Pride_Text_Equal_seqs_INDICES_L5:
X.append(S[:-1])
Y.append(S[1:])
x_int_L5 = np.array(X)
y_int_L5 = np.array(Y)
vocab_size = len(word2idx)
# Train Or Load LSTM
Do_want_To_Train = 0
batch_size = 320
epochs=20
lr=0.001
if Do_want_To_Train == 1:
net1 = WordLSTM() # instantiate the model
net1.cuda() # push the model to GPU
train(net1, epochs, batch_size, lr, 1, 50,x_int_L5,y_int_L5) # train the model
torch.save(net1, 'Q2_Part_1_Network.pt')
else:
net1 = torch.load('Q2_Part_1_Network.pt')
net1.eval()
print(net1)
# Generate text
for i in range(10):
print('=======================================')
print("- Example "+str(i+1)+": ",sample(net1, size='</s>' , prime="<s>", word2idx_Inp = word2idx, idx2word_Inp =idx2word ),'\n')
del X,Y,i,S,Do_want_To_Train
print('=' * 60)
# Q2.2 ###########================-------------------
with open('test_1.txt') as f:
test_1 = [line.rstrip() for line in f]
# sentence segmenting + lower casing + Tokenization + Padding using function NLP_PreProcessing_Test
Test_1_Preprocessed_Pride_Text = []
for t in range(len(test_1)):
Test_1_Preprocessed_Pride_Text = Test_1_Preprocessed_Pride_Text + NLP_PreProcessing_Test((test_1[t])[4:-5])
Test_1_Pride_Text_Equal_seqs = sum([Equal_seq(i,Length_of_Sequences) for i in Test_1_Preprocessed_Pride_Text], [])
del t,f
# Looking up the mapping dictionary and assigning the index to the respective words
Test_1_Pride_Text_Equal_seqs_INDICES =[]
for i, sentence in enumerate(Test_1_Pride_Text_Equal_seqs):
Test_1_Pride_Text_Equal_seqs_INDICES.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
Test_1_X = []
Test_1_Y = []
for S in Test_1_Pride_Text_Equal_seqs_INDICES:
Test_1_X.append(S[:-1])
Test_1_Y.append(S[1:])
Test_1_x_int = np.array(Test_1_X)
Test_1_y_int = np.array(Test_1_Y)
del Test_1_X,Test_1_Y,S
# Calculate Perplexity
Testing(net1, batch_size ,Test_1_x_int,Test_1_y_int)
del Pride_Text,Length_of_Sequences
| 29.820896 | 132 | 0.603478 |
import re
import pickle
import random
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import nltk
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.lm.preprocessing import pad_both_ends
from collections import Counter
import math
ces = [list(pad_both_ends(Tokenized_sentences, n=2))]
return Pad_Tokenized_sentences
def Equal_seq(text, seq_len):
sequences = []
if len(text) > seq_len:
for i in range(seq_len, (len(text)+1)):
seq = text[i-seq_len:i]
sequences.append(seq)
else:
sequences = [['_PAD']*(seq_len-len(text)) + text ]
return sequences
def get_batches(arr_x, arr_y, batch_size):
prv = 0
for n in range(batch_size, arr_x.shape[0], batch_size):
x = arr_x[prv:n,:]
y = arr_y[prv:n,:]
prv = n
yield x, y
class WordLSTM(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size, 200)
m = nn.LSTM(200, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
nn.Dropout(drop_prob)
den, vocab_size)
def forward(self, x, hidden):
)
n)
tm_output)
out = out.reshape(-1, self.n_hidden)
return out, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
def train(net, epochs, batch_size, lr, clip, print_every,XX,YY):
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
net.cuda()
counter = 0
net.train()
for e in range(epochs):
h = net.init_hidden(batch_size)
for x, y in get_batches(XX, YY, batch_size):
counter+= 1
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
inputs, targets = inputs.cuda(), targets.cuda()
h = tuple([each.data for each in h])
net.zero_grad()
output, h = net(inputs, h)
loss = criterion(output, targets.view(-1))
loss.backward()
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
if counter % print_every == 0:
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter))
def predict(net, tkn, h=None, word2idx_Inp = None, idx2word_Inp =None ):
x = np.array([[word2idx_Inp[tkn]]])
inputs = torch.from_numpy(x)
inputs = inputs.cuda()
h = tuple([each.data for each in h])
out, h = net(inputs, h)
p = F.softmax(out, dim=1).data
p = p.cpu()
p = p.numpy()
p = p.reshape(p.shape[1],)
top_n_idx = p.argsort()[-3:][::-1]
sampled_token_index = top_n_idx[random.sample([0,1,2],1)[0]]
return idx2word_Inp[sampled_token_index], h
def sample(net, size, prime="<s>",word2idx_Inp = None, idx2word_Inp =None ):
net.cuda()
net.eval()
h = net.init_hidden(1)
toks = prime.split()
for t in prime.split():
token, h = predict(net, t, h,word2idx_Inp,idx2word_Inp)
toks.append(token)
if size == '</s>':
while(token!='</s>'):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
else:
for i in range(size-1):
token, h = predict(net, toks[-1], h,word2idx_Inp,idx2word_Inp)
toks.append(token)
return ' '.join(toks)
def Testing(net, batch_size,Test_X,Test_Y):
net.eval()
criterion = nn.CrossEntropyLoss()
h = net.init_hidden(batch_size)
test_loss = 0.
with torch.no_grad():
for x, y in get_batches(Test_X, Test_Y, batch_size):
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
inputs, targets = inputs.cuda(), targets.cuda()
h = tuple([each.data for each in h])
output, h = net(inputs, h)
test_loss += criterion(output, targets.view(-1)).item()
test_loss = test_loss / ((len(Test_X) // batch_size)+1)
print('-' * 40)
print('Test loss {:5.2f} ------ Test perplexity {:8.2f}'.format(test_loss, math.exp(test_loss)))
print('-' * 40)
class WordLSTM_with_Glove(nn.Module):
def __init__(self, n_hidden=256, n_layers=4, drop_prob=0.3, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
self.emb_layer = nn.Embedding(vocab_size_Q6,100, padding_idx=0)
self.emb_layer.weight.data.copy_(torch.from_numpy(embedding_matrix))
self.emb_layer.weight.requires_grad = False n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
nn.Dropout(drop_prob)
den, vocab_size_Q6)
def forward(self, x, hidden):
)
n)
tm_output)
out = out.reshape(-1, self.n_hidden)
return out, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
if (torch.cuda.is_available()):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
rd2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
X = []
Y = []
for S in Pride_Text_Equal_seqs_INDICES_L5:
X.append(S[:-1])
Y.append(S[1:])
x_int_L5 = np.array(X)
y_int_L5 = np.array(Y)
vocab_size = len(word2idx)
Do_want_To_Train = 0
batch_size = 320
epochs=20
lr=0.001
if Do_want_To_Train == 1:
net1 = WordLSTM()
net1.cuda()
train(net1, epochs, batch_size, lr, 1, 50,x_int_L5,y_int_L5)
torch.save(net1, 'Q2_Part_1_Network.pt')
else:
net1 = torch.load('Q2_Part_1_Network.pt')
net1.eval()
print(net1)
for i in range(10):
print('=======================================')
print("- Example "+str(i+1)+": ",sample(net1, size='</s>' , prime="<s>", word2idx_Inp = word2idx, idx2word_Inp =idx2word ),'\n')
del X,Y,i,S,Do_want_To_Train
print('=' * 60)
e(Test_1_Pride_Text_Equal_seqs):
Test_1_Pride_Text_Equal_seqs_INDICES.append([word2idx[word] if word in word2idx else word2idx['_UNK'] for word in sentence])
del i, sentence
Test_1_X = []
Test_1_Y = []
for S in Test_1_Pride_Text_Equal_seqs_INDICES:
Test_1_X.append(S[:-1])
Test_1_Y.append(S[1:])
Test_1_x_int = np.array(Test_1_X)
Test_1_y_int = np.array(Test_1_Y)
del Test_1_X,Test_1_Y,S
Testing(net1, batch_size ,Test_1_x_int,Test_1_y_int)
del Pride_Text,Length_of_Sequences
| true | true |
f72654a975ae6f6f07f9c0a9f4ef3e7ceecfc94a | 488 | py | Python | AssetsApp/migrations/0010_alter_assetscategories_datetime_added.py | Kayarn-Mechatronics/Octello | 45f4f73c764ca816918c31ef3ae4889740a68802 | [
"Apache-2.0"
] | null | null | null | AssetsApp/migrations/0010_alter_assetscategories_datetime_added.py | Kayarn-Mechatronics/Octello | 45f4f73c764ca816918c31ef3ae4889740a68802 | [
"Apache-2.0"
] | null | null | null | AssetsApp/migrations/0010_alter_assetscategories_datetime_added.py | Kayarn-Mechatronics/Octello | 45f4f73c764ca816918c31ef3ae4889740a68802 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.5 on 2021-09-08 01:39
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AssetsApp', '0009_alter_assetscategories_datetime_added'),
]
operations = [
migrations.AlterField(
model_name='assetscategories',
name='datetime_added',
field=models.DateTimeField(default=datetime.datetime(2021, 9, 8, 1, 37, 53, 844688)),
),
]
| 24.4 | 97 | 0.647541 |
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('AssetsApp', '0009_alter_assetscategories_datetime_added'),
]
operations = [
migrations.AlterField(
model_name='assetscategories',
name='datetime_added',
field=models.DateTimeField(default=datetime.datetime(2021, 9, 8, 1, 37, 53, 844688)),
),
]
| true | true |
f72656c4b0c210ce9367e4cf253ca9a70c73fd41 | 5,884 | py | Python | Jumpscale/clients/oauth/OauthInstance.py | threefoldtech/JumpscaleX | 5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa | [
"Apache-2.0"
] | 2 | 2019-05-09T07:21:25.000Z | 2019-08-05T06:37:53.000Z | Jumpscale/clients/oauth/OauthInstance.py | threefoldtech/JumpscaleX | 5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa | [
"Apache-2.0"
] | 664 | 2018-12-19T12:43:44.000Z | 2019-08-23T04:24:42.000Z | Jumpscale/clients/oauth/OauthInstance.py | threefoldtech/jumpscale10 | 5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa | [
"Apache-2.0"
] | 7 | 2019-05-03T07:14:37.000Z | 2019-08-05T12:36:52.000Z | import urllib.request
import urllib.parse
import urllib.error
import string
import requests
import time
import random
from Jumpscale import j
JSConfigClient = j.application.JSBaseConfigClass
class OauthClient(JSConfigClient):
_SCHEMATEXT = """
@url = jumpscale.oauth.client
name* = "" (S)
addr = "" (S)
accesstokenaddr = "" (S)
client_id = "" (S)
secret_ = "" (S)
scope = "" (S)
redirect_url = "" (S)
user_info_url = "" (S)
logout_url = "" (S)
client_instance = "github" (S)
"""
def _init(self):
self.addr = self.addr
self.accesstokenaddr = self.accesstokenaddr
self.client_id = self.client_id
self.secret = self.secret_
self.scope = self.scope
self.redirect_url = self.redirect_url
self.user_info_url = self.user_info_url
self.logout_url = self.logout_url
self.client_instance = self.client_instance
self._client = None
self.instance = "main"
@property
def client(self):
if self._client:
return self._client
if self.client_instance in ("itsyouonline", "itsyou.online"):
self._client = ItsYouOnline(
self.addr,
self.accesstokenaddr,
self.client_id,
self.secret,
self.scope,
self.redirect_url,
self.user_info_url,
self.logout_url,
self.instance,
)
else:
self._client = OauthInstance(
self.addr,
self.accesstokenaddr,
self.client_id,
self.secret,
self.scope,
self.redirect_url,
self.user_info_url,
self.logout_url,
self.instance,
)
return self._client
class AuthError(Exception):
def _init(self):
pass
class UserInfo(object):
def _init(self, username, emailaddress, groups):
self.username = username
self.emailaddress = emailaddress
self.groups = groups
class OauthInstance:
def __init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
):
if not addr:
raise RuntimeError("Failed to get oauth instance, no address provided")
else:
self.addr = addr
self.client_id = client_id
self.scope = scope
self.redirect_url = redirect_url
self.accesstokenaddress = accesstokenaddr
self.secret = secret
self.user_info_url = user_info_url
self.logout_url = logout_url
self.state = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
@property
def url(self):
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_url,
"state": self.state,
"response_type": "code",
}
if self.scope:
params.update({"scope": self.scope})
return "%s?%s" % (self.addr, urllib.parse.urlencode(params))
def getAccessToken(self, code, state):
payload = {
"code": code,
"client_id": self.client_id,
"client_secret": self.secret,
"redirect_uri": self.redirect_url,
"grant_type": "authorization_code",
"state": state,
}
result = requests.post(self.accesstokenaddress, data=payload, headers={"Accept": "application/json"})
if not result.ok or "error" in result.json():
msg = result.json()["error"]
self._log_error(msg)
raise AuthError(msg)
return result.json()
def getUserInfo(self, accesstoken):
params = {"access_token": accesstoken["access_token"]}
userinforesp = requests.get(self.user_info_url, params=params)
if not userinforesp.ok:
msg = "Failed to get user details"
self._log_error(msg)
raise AuthError(msg)
userinfo = userinforesp.json()
return UserInfo(userinfo["login"], userinfo["email"], ["user"])
class ItsYouOnline(OauthInstance):
def __init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
):
OauthInstance.__init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
)
def getAccessToken(self):
return j.clients.itsyouonline.jwt_get(self.client_id, self.secret)
def getUserInfo(self, accesstoken):
import jose
import jose.jwt
jwt = accesstoken
headers = {"Authorization": "bearer %s" % jwt}
jwtdata = jose.jwt.get_unverified_claims(jwt)
scopes = jwtdata["scope"]
requestedscopes = set(self.scope.split(","))
if set(jwtdata["scope"]).intersection(requestedscopes) != requestedscopes:
msg = "Failed to get the requested scope for %s" % self.client_id
raise AuthError(msg)
username = jwtdata["username"]
userinfourl = self.user_info_url.rstrip("/") + "/%s/info" % username
userinforesp = requests.get(userinfourl, headers=headers)
if not userinforesp.ok:
msg = "Failed to get user details"
raise AuthError(msg)
groups = ["user"]
for scope in scopes:
parts = scope.split(":")
if len(parts) == 3 and parts[:2] == ["user", "memberof"]:
groups.append(parts[-1].split(".")[-1])
userinfo = userinforesp.json()
return UserInfo(userinfo["username"], userinfo["emailaddresses"][0]["emailaddress"], groups)
| 31.978261 | 116 | 0.579708 | import urllib.request
import urllib.parse
import urllib.error
import string
import requests
import time
import random
from Jumpscale import j
JSConfigClient = j.application.JSBaseConfigClass
class OauthClient(JSConfigClient):
_SCHEMATEXT = """
@url = jumpscale.oauth.client
name* = "" (S)
addr = "" (S)
accesstokenaddr = "" (S)
client_id = "" (S)
secret_ = "" (S)
scope = "" (S)
redirect_url = "" (S)
user_info_url = "" (S)
logout_url = "" (S)
client_instance = "github" (S)
"""
def _init(self):
self.addr = self.addr
self.accesstokenaddr = self.accesstokenaddr
self.client_id = self.client_id
self.secret = self.secret_
self.scope = self.scope
self.redirect_url = self.redirect_url
self.user_info_url = self.user_info_url
self.logout_url = self.logout_url
self.client_instance = self.client_instance
self._client = None
self.instance = "main"
@property
def client(self):
if self._client:
return self._client
if self.client_instance in ("itsyouonline", "itsyou.online"):
self._client = ItsYouOnline(
self.addr,
self.accesstokenaddr,
self.client_id,
self.secret,
self.scope,
self.redirect_url,
self.user_info_url,
self.logout_url,
self.instance,
)
else:
self._client = OauthInstance(
self.addr,
self.accesstokenaddr,
self.client_id,
self.secret,
self.scope,
self.redirect_url,
self.user_info_url,
self.logout_url,
self.instance,
)
return self._client
class AuthError(Exception):
def _init(self):
pass
class UserInfo(object):
def _init(self, username, emailaddress, groups):
self.username = username
self.emailaddress = emailaddress
self.groups = groups
class OauthInstance:
def __init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
):
if not addr:
raise RuntimeError("Failed to get oauth instance, no address provided")
else:
self.addr = addr
self.client_id = client_id
self.scope = scope
self.redirect_url = redirect_url
self.accesstokenaddress = accesstokenaddr
self.secret = secret
self.user_info_url = user_info_url
self.logout_url = logout_url
self.state = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
@property
def url(self):
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_url,
"state": self.state,
"response_type": "code",
}
if self.scope:
params.update({"scope": self.scope})
return "%s?%s" % (self.addr, urllib.parse.urlencode(params))
def getAccessToken(self, code, state):
payload = {
"code": code,
"client_id": self.client_id,
"client_secret": self.secret,
"redirect_uri": self.redirect_url,
"grant_type": "authorization_code",
"state": state,
}
result = requests.post(self.accesstokenaddress, data=payload, headers={"Accept": "application/json"})
if not result.ok or "error" in result.json():
msg = result.json()["error"]
self._log_error(msg)
raise AuthError(msg)
return result.json()
def getUserInfo(self, accesstoken):
params = {"access_token": accesstoken["access_token"]}
userinforesp = requests.get(self.user_info_url, params=params)
if not userinforesp.ok:
msg = "Failed to get user details"
self._log_error(msg)
raise AuthError(msg)
userinfo = userinforesp.json()
return UserInfo(userinfo["login"], userinfo["email"], ["user"])
class ItsYouOnline(OauthInstance):
def __init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
):
OauthInstance.__init__(
self, addr, accesstokenaddr, client_id, secret, scope, redirect_url, user_info_url, logout_url, instance
)
def getAccessToken(self):
return j.clients.itsyouonline.jwt_get(self.client_id, self.secret)
def getUserInfo(self, accesstoken):
import jose
import jose.jwt
jwt = accesstoken
headers = {"Authorization": "bearer %s" % jwt}
jwtdata = jose.jwt.get_unverified_claims(jwt)
scopes = jwtdata["scope"]
requestedscopes = set(self.scope.split(","))
if set(jwtdata["scope"]).intersection(requestedscopes) != requestedscopes:
msg = "Failed to get the requested scope for %s" % self.client_id
raise AuthError(msg)
username = jwtdata["username"]
userinfourl = self.user_info_url.rstrip("/") + "/%s/info" % username
userinforesp = requests.get(userinfourl, headers=headers)
if not userinforesp.ok:
msg = "Failed to get user details"
raise AuthError(msg)
groups = ["user"]
for scope in scopes:
parts = scope.split(":")
if len(parts) == 3 and parts[:2] == ["user", "memberof"]:
groups.append(parts[-1].split(".")[-1])
userinfo = userinforesp.json()
return UserInfo(userinfo["username"], userinfo["emailaddresses"][0]["emailaddress"], groups)
| true | true |
f7265773512670aeae3bd3c088b90f7348f9d8d0 | 6,363 | py | Python | dump_teacher_hiddens.py | ChenRocks/Distill-BERT-Textgen | a3b0b22ce16febc4d3ffdbd8791ea3374110a892 | [
"MIT"
] | 111 | 2020-05-05T04:34:10.000Z | 2022-02-20T17:04:56.000Z | dump_teacher_hiddens.py | ChenRocks/Distill-BERT-Textgen | a3b0b22ce16febc4d3ffdbd8791ea3374110a892 | [
"MIT"
] | 5 | 2020-06-06T12:45:52.000Z | 2021-03-16T13:22:37.000Z | dump_teacher_hiddens.py | ChenRocks/Distill-BERT-Textgen | a3b0b22ce16febc4d3ffdbd8791ea3374110a892 | [
"MIT"
] | 17 | 2020-05-07T07:43:05.000Z | 2022-01-19T11:33:33.000Z | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
precompute hidden states of CMLM teacher to speedup KD training
"""
import argparse
import io
import os
import shelve
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
from toolz.sandbox import unzip
from cmlm.model import BertForSeq2seq
from cmlm.data import convert_token_to_bert, CLS, SEP, MASK
def tensor_dumps(tensor):
with io.BytesIO() as writer:
np.save(writer, tensor.cpu().numpy().astype(np.float16),
allow_pickle=False)
dump = writer.getvalue()
return dump
def gather_hiddens(hiddens, masks):
outputs = []
for hid, mask in zip(hiddens.split(1, dim=1), masks.split(1, dim=1)):
if mask.sum().item() == 0:
continue
mask = mask.unsqueeze(-1).expand_as(hid)
outputs.append(hid.masked_select(mask))
output = torch.stack(outputs, dim=0)
return output
class BertSampleDataset(Dataset):
def __init__(self, corpus_path, tokenizer, num_samples=7):
self.db = shelve.open(corpus_path, 'r')
self.ids = []
for i, ex in self.db.items():
if len(ex['src']) + len(ex['tgt']) + 3 <= 512:
self.ids.append(i)
self.toker = tokenizer
self.num_samples = num_samples
def __len__(self):
return len(self.ids)
def __getitem__(self, i):
id_ = self.ids[i]
example = self.db[id_]
features = convert_example(example['src'], example['tgt'],
self.toker, self.num_samples)
return (id_, ) + features
def convert_example(src, tgt, toker, num_samples):
src = [convert_token_to_bert(tok) for tok in src]
tgt = [convert_token_to_bert(tok) for tok in tgt] + [SEP]
# build the random masks
tgt_len = len(tgt)
if tgt_len <= num_samples:
masks = torch.eye(tgt_len).byte()
num_samples = tgt_len
else:
mask_inds = [list(range(i, tgt_len, num_samples))
for i in range(num_samples)]
masks = torch.zeros(num_samples, tgt_len).byte()
for i, indices in enumerate(mask_inds):
for j in indices:
masks.data[i, j] = 1
assert (masks.sum(dim=0) != torch.ones(tgt_len).long()).sum().item() == 0
assert masks.sum().item() == tgt_len
masks = torch.cat([torch.zeros(num_samples, len(src)+2).byte(), masks],
dim=1)
# make BERT inputs
input_ids = toker.convert_tokens_to_ids([CLS] + src + [SEP] + tgt)
mask_id = toker.convert_tokens_to_ids([MASK])[0]
input_ids = torch.tensor([input_ids for _ in range(num_samples)])
input_ids.data.masked_fill_(masks, mask_id)
token_ids = torch.tensor([[0] * (len(src) + 2) + [1] * len(tgt)
for _ in range(num_samples)])
return input_ids, token_ids, masks
def batch_features(features):
ids, all_input_ids, all_token_ids, all_masks = map(list, unzip(features))
batch_size = sum(input_ids.size(0) for input_ids in all_input_ids)
max_len = max(input_ids.size(1) for input_ids in all_input_ids)
input_ids = torch.zeros(batch_size, max_len).long()
token_ids = torch.zeros(batch_size, max_len).long()
attn_mask = torch.zeros(batch_size, max_len).long()
i = 0
for inp, tok in zip(all_input_ids, all_token_ids):
block, len_ = inp.size()
input_ids.data[i: i+block, :len_] = inp.data
token_ids.data[i: i+block, :len_] = tok.data
attn_mask.data[i: i+block, :len_].fill_(1)
i += block
return ids, input_ids, token_ids, attn_mask, all_masks
def process_batch(batch, bert, toker, num_samples=7):
input_ids, token_ids, attn_mask, all_masks = batch
input_ids = input_ids.cuda()
token_ids = token_ids.cuda()
attn_mask = attn_mask.cuda()
hiddens, _ = bert.bert(input_ids, token_ids, attn_mask,
output_all_encoded_layers=False)
hiddens = bert.cls.predictions.transform(hiddens)
i = 0
outputs = []
for masks in all_masks:
block, len_ = masks.size()
hids = hiddens[i:i+block, :len_, :]
masks = masks.cuda()
outputs.append(gather_hiddens(hids, masks))
i += block
return outputs
def build_db_batched(corpus, out_db, bert, toker, batch_size=8):
dataset = BertSampleDataset(corpus, toker)
loader = DataLoader(dataset, batch_size=batch_size,
num_workers=4, collate_fn=batch_features)
with tqdm(desc='computing BERT features', total=len(dataset)) as pbar:
for ids, *batch in loader:
outputs = process_batch(batch, bert, toker)
for id_, output in zip(ids, outputs):
out_db[id_] = tensor_dumps(output)
pbar.update(len(ids))
def main(opts):
# load BERT
state_dict = torch.load(opts.ckpt)
vsize = state_dict['cls.predictions.decoder.weight'].size(0)
bert = BertForSeq2seq.from_pretrained(opts.bert).eval().half().cuda()
bert.update_output_layer_by_size(vsize)
bert.load_state_dict(state_dict)
toker = BertTokenizer.from_pretrained(opts.bert,
do_lower_case='uncased' in opts.bert)
# save the final projection layer
linear = torch.nn.Linear(bert.config.hidden_size, bert.config.vocab_size)
linear.weight.data = state_dict['cls.predictions.decoder.weight']
linear.bias.data = state_dict['cls.predictions.bias']
os.makedirs(opts.output)
torch.save(linear, f'{opts.output}/linear.pt')
# create DB
with shelve.open(f'{opts.output}/db') as out_db, \
torch.no_grad():
build_db_batched(opts.db, out_db, bert, toker)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bert', required=True,
choices=['bert-base-uncased',
'bert-base-multilingual-cased'],
help='BERT model')
parser.add_argument('--ckpt', required=True, help='BERT checkpoint')
parser.add_argument('--db', required=True, help='dataset to compute')
parser.add_argument('--output', required=True, help='path to dump output')
args = parser.parse_args()
main(args)
| 35.949153 | 79 | 0.636964 | import argparse
import io
import os
import shelve
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from pytorch_pretrained_bert import BertTokenizer
from toolz.sandbox import unzip
from cmlm.model import BertForSeq2seq
from cmlm.data import convert_token_to_bert, CLS, SEP, MASK
def tensor_dumps(tensor):
with io.BytesIO() as writer:
np.save(writer, tensor.cpu().numpy().astype(np.float16),
allow_pickle=False)
dump = writer.getvalue()
return dump
def gather_hiddens(hiddens, masks):
outputs = []
for hid, mask in zip(hiddens.split(1, dim=1), masks.split(1, dim=1)):
if mask.sum().item() == 0:
continue
mask = mask.unsqueeze(-1).expand_as(hid)
outputs.append(hid.masked_select(mask))
output = torch.stack(outputs, dim=0)
return output
class BertSampleDataset(Dataset):
def __init__(self, corpus_path, tokenizer, num_samples=7):
self.db = shelve.open(corpus_path, 'r')
self.ids = []
for i, ex in self.db.items():
if len(ex['src']) + len(ex['tgt']) + 3 <= 512:
self.ids.append(i)
self.toker = tokenizer
self.num_samples = num_samples
def __len__(self):
return len(self.ids)
def __getitem__(self, i):
id_ = self.ids[i]
example = self.db[id_]
features = convert_example(example['src'], example['tgt'],
self.toker, self.num_samples)
return (id_, ) + features
def convert_example(src, tgt, toker, num_samples):
src = [convert_token_to_bert(tok) for tok in src]
tgt = [convert_token_to_bert(tok) for tok in tgt] + [SEP]
tgt_len = len(tgt)
if tgt_len <= num_samples:
masks = torch.eye(tgt_len).byte()
num_samples = tgt_len
else:
mask_inds = [list(range(i, tgt_len, num_samples))
for i in range(num_samples)]
masks = torch.zeros(num_samples, tgt_len).byte()
for i, indices in enumerate(mask_inds):
for j in indices:
masks.data[i, j] = 1
assert (masks.sum(dim=0) != torch.ones(tgt_len).long()).sum().item() == 0
assert masks.sum().item() == tgt_len
masks = torch.cat([torch.zeros(num_samples, len(src)+2).byte(), masks],
dim=1)
input_ids = toker.convert_tokens_to_ids([CLS] + src + [SEP] + tgt)
mask_id = toker.convert_tokens_to_ids([MASK])[0]
input_ids = torch.tensor([input_ids for _ in range(num_samples)])
input_ids.data.masked_fill_(masks, mask_id)
token_ids = torch.tensor([[0] * (len(src) + 2) + [1] * len(tgt)
for _ in range(num_samples)])
return input_ids, token_ids, masks
def batch_features(features):
ids, all_input_ids, all_token_ids, all_masks = map(list, unzip(features))
batch_size = sum(input_ids.size(0) for input_ids in all_input_ids)
max_len = max(input_ids.size(1) for input_ids in all_input_ids)
input_ids = torch.zeros(batch_size, max_len).long()
token_ids = torch.zeros(batch_size, max_len).long()
attn_mask = torch.zeros(batch_size, max_len).long()
i = 0
for inp, tok in zip(all_input_ids, all_token_ids):
block, len_ = inp.size()
input_ids.data[i: i+block, :len_] = inp.data
token_ids.data[i: i+block, :len_] = tok.data
attn_mask.data[i: i+block, :len_].fill_(1)
i += block
return ids, input_ids, token_ids, attn_mask, all_masks
def process_batch(batch, bert, toker, num_samples=7):
input_ids, token_ids, attn_mask, all_masks = batch
input_ids = input_ids.cuda()
token_ids = token_ids.cuda()
attn_mask = attn_mask.cuda()
hiddens, _ = bert.bert(input_ids, token_ids, attn_mask,
output_all_encoded_layers=False)
hiddens = bert.cls.predictions.transform(hiddens)
i = 0
outputs = []
for masks in all_masks:
block, len_ = masks.size()
hids = hiddens[i:i+block, :len_, :]
masks = masks.cuda()
outputs.append(gather_hiddens(hids, masks))
i += block
return outputs
def build_db_batched(corpus, out_db, bert, toker, batch_size=8):
dataset = BertSampleDataset(corpus, toker)
loader = DataLoader(dataset, batch_size=batch_size,
num_workers=4, collate_fn=batch_features)
with tqdm(desc='computing BERT features', total=len(dataset)) as pbar:
for ids, *batch in loader:
outputs = process_batch(batch, bert, toker)
for id_, output in zip(ids, outputs):
out_db[id_] = tensor_dumps(output)
pbar.update(len(ids))
def main(opts):
state_dict = torch.load(opts.ckpt)
vsize = state_dict['cls.predictions.decoder.weight'].size(0)
bert = BertForSeq2seq.from_pretrained(opts.bert).eval().half().cuda()
bert.update_output_layer_by_size(vsize)
bert.load_state_dict(state_dict)
toker = BertTokenizer.from_pretrained(opts.bert,
do_lower_case='uncased' in opts.bert)
linear = torch.nn.Linear(bert.config.hidden_size, bert.config.vocab_size)
linear.weight.data = state_dict['cls.predictions.decoder.weight']
linear.bias.data = state_dict['cls.predictions.bias']
os.makedirs(opts.output)
torch.save(linear, f'{opts.output}/linear.pt')
with shelve.open(f'{opts.output}/db') as out_db, \
torch.no_grad():
build_db_batched(opts.db, out_db, bert, toker)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bert', required=True,
choices=['bert-base-uncased',
'bert-base-multilingual-cased'],
help='BERT model')
parser.add_argument('--ckpt', required=True, help='BERT checkpoint')
parser.add_argument('--db', required=True, help='dataset to compute')
parser.add_argument('--output', required=True, help='path to dump output')
args = parser.parse_args()
main(args)
| true | true |
f72658a22e37eca598653ef574062e41fecd324c | 1,652 | py | Python | test/vanilla/Expected/AcceptanceTests/NonStringEnums/nonstringenums/models/_non_string_enums_client_enums.py | tasherif-msft/autorest.python | 5b0121bcfa802aedaeda36990e8bcaa2b7e26b14 | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/NonStringEnums/nonstringenums/models/_non_string_enums_client_enums.py | tasherif-msft/autorest.python | 5b0121bcfa802aedaeda36990e8bcaa2b7e26b14 | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/NonStringEnums/nonstringenums/models/_non_string_enums_client_enums.py | tasherif-msft/autorest.python | 5b0121bcfa802aedaeda36990e8bcaa2b7e26b14 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class FloatEnum(with_metaclass(_CaseInsensitiveEnumMeta, float, Enum)):
"""List of float enums
"""
TWO_HUNDRED4 = 200.4
FOUR_HUNDRED_THREE4 = 403.4
FOUR_HUNDRED_FIVE3 = 405.3
FOUR_HUNDRED_SIX2 = 406.2
FOUR_HUNDRED_TWENTY_NINE1 = 429.1
class IntEnum(with_metaclass(_CaseInsensitiveEnumMeta, int, Enum)):
"""List of integer enums
"""
TWO_HUNDRED = 200
FOUR_HUNDRED_THREE = 403
FOUR_HUNDRED_FIVE = 405
FOUR_HUNDRED_SIX = 406
FOUR_HUNDRED_TWENTY_NINE = 429
| 34.416667 | 94 | 0.640436 |
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class FloatEnum(with_metaclass(_CaseInsensitiveEnumMeta, float, Enum)):
TWO_HUNDRED4 = 200.4
FOUR_HUNDRED_THREE4 = 403.4
FOUR_HUNDRED_FIVE3 = 405.3
FOUR_HUNDRED_SIX2 = 406.2
FOUR_HUNDRED_TWENTY_NINE1 = 429.1
class IntEnum(with_metaclass(_CaseInsensitiveEnumMeta, int, Enum)):
TWO_HUNDRED = 200
FOUR_HUNDRED_THREE = 403
FOUR_HUNDRED_FIVE = 405
FOUR_HUNDRED_SIX = 406
FOUR_HUNDRED_TWENTY_NINE = 429
| true | true |
f72658a883e0ec8dce8892d634b7105d4bac6c3d | 354 | py | Python | algorithm/challenges/geanytest.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | null | null | null | algorithm/challenges/geanytest.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | null | null | null | algorithm/challenges/geanytest.py | rishabhiitbhu/hackerrank | acc300851c81a29472177f15fd8b56ebebe853ea | [
"MIT"
] | 1 | 2020-01-30T06:47:09.000Z | 2020-01-30T06:47:09.000Z | t = int(input().strip())
for i in range(t):
n,c,m = input().strip().split(' ')
n,c,m = [int(n),int(c),int(m)]
tot_choc=n//c
wrap=tot_choc
#print(tot_choc, wrap)
while wrap >= m:
extra_choc = wrap//m
wrap-=m*extra_choc
wrap+=extra_choc
tot_choc+= extra_choc
print(tot_choc)
| 25.285714 | 39 | 0.516949 | t = int(input().strip())
for i in range(t):
n,c,m = input().strip().split(' ')
n,c,m = [int(n),int(c),int(m)]
tot_choc=n//c
wrap=tot_choc
while wrap >= m:
extra_choc = wrap//m
wrap-=m*extra_choc
wrap+=extra_choc
tot_choc+= extra_choc
print(tot_choc)
| true | true |
f72658e3bad5fe570677617b549a92f97f742c6b | 8,745 | py | Python | selfdrive/car/hyundai/carcontroller.py | janpoo6427/openpilot_xx979xx | 189764c2ad1d6610165876a0462ba0eb896ac500 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/carcontroller.py | janpoo6427/openpilot_xx979xx | 189764c2ad1d6610165876a0462ba0eb896ac500 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/carcontroller.py | janpoo6427/openpilot_xx979xx | 189764c2ad1d6610165876a0462ba0eb896ac500 | [
"MIT"
] | 2 | 2020-09-27T20:46:34.000Z | 2020-10-15T01:01:57.000Z | from cereal import car
from common.numpy_fast import clip
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.hyundai.hyundaican import create_lkas11, create_clu11, create_lfa_mfa, \
create_scc11, create_scc12, create_mdps12, \
create_scc13, create_scc14
from selfdrive.car.hyundai.values import Buttons, SteerLimitParams, CAR
from opendbc.can.packer import CANPacker
from selfdrive.config import Conversions as CV
VisualAlert = car.CarControl.HUDControl.VisualAlert
min_set_speed = 30 * CV.KPH_TO_MS
# Accel limits
ACCEL_HYST_GAP = 0.02 # don't change accel command for small oscilalitons within this value
ACCEL_MAX = 1.5 # 1.5 m/s2
ACCEL_MIN = -3.0 # 3 m/s2
ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN)
def accel_hysteresis(accel, accel_steady):
# for small accel oscillations within ACCEL_HYST_GAP, don't change the accel command
if accel > accel_steady + ACCEL_HYST_GAP:
accel_steady = accel - ACCEL_HYST_GAP
elif accel < accel_steady - ACCEL_HYST_GAP:
accel_steady = accel + ACCEL_HYST_GAP
accel = accel_steady
return accel, accel_steady
def process_hud_alert(enabled, fingerprint, visual_alert, left_lane,
right_lane, left_lane_depart, right_lane_depart, button_on):
sys_warning = (visual_alert == VisualAlert.steerRequired)
# initialize to no line visible
sys_state = 1
if not button_on:
lane_visible = 0
if left_lane and right_lane or sys_warning: #HUD alert only display when LKAS status is active
if enabled or sys_warning:
sys_state = 3
else:
sys_state = 4
elif left_lane:
sys_state = 5
elif right_lane:
sys_state = 6
# initialize to no warnings
left_lane_warning = 0
right_lane_warning = 0
if left_lane_depart:
left_lane_warning = 1 if fingerprint in [CAR.HYUNDAI_GENESIS, CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
if right_lane_depart:
right_lane_warning = 1 if fingerprint in [CAR.HYUNDAI_GENESIS, CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
return sys_warning, sys_state, left_lane_warning, right_lane_warning
class CarController():
def __init__(self, dbc_name, CP, VM):
self.car_fingerprint = CP.carFingerprint
self.packer = CANPacker(dbc_name)
self.accel_steady = 0
self.apply_steer_last = 0
self.steer_rate_limited = False
self.lkas11_cnt = 0
self.scc12_cnt = 0
self.resume_cnt = 0
self.last_resume_frame = 0
self.last_lead_distance = 0
self.turning_signal_timer = 0
self.lkas_button_on = True
self.longcontrol = CP.openpilotLongitudinalControl
self.scc_live = not CP.radarOffCan
def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart, set_speed, lead_visible):
# *** compute control surfaces ***
# gas and brake
apply_accel = actuators.gas - actuators.brake
apply_accel, self.accel_steady = accel_hysteresis(apply_accel, self.accel_steady)
apply_accel = clip(apply_accel * ACCEL_SCALE, ACCEL_MIN, ACCEL_MAX)
# Steering Torque
new_steer = actuators.steer * SteerLimitParams.STEER_MAX
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, SteerLimitParams)
self.steer_rate_limited = new_steer != apply_steer
# disable if steer angle reach 90 deg, otherwise mdps fault in some models
# temporarily disable steering when LKAS button off
lkas_active = enabled and abs(CS.out.steeringAngle) < 90. and self.lkas_button_on
# fix for Genesis hard fault at low speed
if CS.out.vEgo < 60 * CV.KPH_TO_MS and self.car_fingerprint == CAR.HYUNDAI_GENESIS and not CS.mdps_bus:
lkas_active = 0
# Disable steering while turning blinker on and speed below 60 kph
if CS.out.leftBlinker or CS.out.rightBlinker:
if self.car_fingerprint not in [CAR.KIA_OPTIMA, CAR.KIA_OPTIMA_H]:
self.turning_signal_timer = 100 # Disable for 1.0 Seconds after blinker turned off
elif CS.left_blinker_flash or CS.right_blinker_flash: # Optima has blinker flash signal only
self.turning_signal_timer = 100
if self.turning_indicator_alert: # set and clear by interface
lkas_active = 0
if self.turning_signal_timer > 0:
self.turning_signal_timer -= 1
if not lkas_active:
apply_steer = 0
self.apply_accel_last = apply_accel
self.apply_steer_last = apply_steer
sys_warning, sys_state, left_lane_warning, right_lane_warning =\
process_hud_alert(lkas_active, self.car_fingerprint, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart,
self.lkas_button_on)
clu11_speed = CS.clu11["CF_Clu_Vanz"]
enabled_speed = 38 if CS.is_set_speed_in_mph else 60
if clu11_speed > enabled_speed or not lkas_active:
enabled_speed = clu11_speed
if set_speed < min_set_speed:
set_speed = min_set_speed
set_speed *= CV.MS_TO_MPH if CS.is_set_speed_in_mph else CV.MS_TO_KPH
if frame == 0: # initialize counts from last received count signals
self.lkas11_cnt = CS.lkas11["CF_Lkas_MsgCount"]
self.scc12_cnt = CS.scc12["CR_VSM_Alive"] + 1 if not CS.no_radar else 0
#TODO: fix this
# self.prev_scc_cnt = CS.scc11["AliveCounterACC"]
# self.scc_update_frame = frame
# check if SCC is alive
# if frame % 7 == 0:
# if CS.scc11["AliveCounterACC"] == self.prev_scc_cnt:
# if frame - self.scc_update_frame > 20 and self.scc_live:
# self.scc_live = False
# else:
# self.scc_live = True
# self.prev_scc_cnt = CS.scc11["AliveCounterACC"]
# self.scc_update_frame = frame
self.prev_scc_cnt = CS.scc11["AliveCounterACC"]
self.lkas11_cnt = (self.lkas11_cnt + 1) % 0x10
self.scc12_cnt %= 0xF
can_sends = []
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 0))
if CS.mdps_bus or CS.scc_bus == 1: # send lkas11 bus 1 if mdps or scc is on bus 1
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 1))
if frame % 2 and CS.mdps_bus: # send clu11 to mdps if it is not on bus 0
can_sends.append(create_clu11(self.packer, frame, CS.mdps_bus, CS.clu11, Buttons.NONE, enabled_speed))
if pcm_cancel_cmd and self.longcontrol:
can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.CANCEL, clu11_speed))
elif CS.mdps_bus: # send mdps12 to LKAS to prevent LKAS error if no cancel cmd
can_sends.append(create_mdps12(self.packer, frame, CS.mdps12))
# send scc to car if longcontrol enabled and SCC not on bus 0 or ont live
if self.longcontrol and (CS.scc_bus or not self.scc_live) and frame % 2 == 0:
can_sends.append(create_scc12(self.packer, apply_accel, enabled, self.scc12_cnt, self.scc_live, CS.scc12))
can_sends.append(create_scc11(self.packer, frame, enabled, set_speed, lead_visible, self.scc_live, CS.scc11))
if CS.has_scc13 and frame % 20 == 0:
can_sends.append(create_scc13(self.packer, CS.scc13))
if CS.has_scc14:
can_sends.append(create_scc14(self.packer, enabled, CS.scc14))
self.scc12_cnt += 1
if CS.out.cruiseState.standstill:
# run only first time when the car stopped
if self.last_lead_distance == 0:
# get the lead distance from the Radar
self.last_lead_distance = CS.lead_distance
self.resume_cnt = 0
# when lead car starts moving, create 6 RES msgs
elif CS.lead_distance != self.last_lead_distance and (frame - self.last_resume_frame) > 5:
can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.RES_ACCEL, clu11_speed))
self.resume_cnt += 1
# interval after 6 msgs
if self.resume_cnt > 5:
self.last_resume_frame = frame
self.resume_cnt = 0
# reset lead distnce after the car starts moving
elif self.last_lead_distance != 0:
self.last_lead_distance = 0
# 20 Hz LFA MFA message
if frame % 5 == 0 and self.car_fingerprint in [CAR.SONATA, CAR.PALISADE, CAR.SONATA_H, CAR.SANTA_FE]:
can_sends.append(create_lfa_mfa(self.packer, frame, enabled))
return can_sends
| 42.658537 | 122 | 0.705432 | from cereal import car
from common.numpy_fast import clip
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.hyundai.hyundaican import create_lkas11, create_clu11, create_lfa_mfa, \
create_scc11, create_scc12, create_mdps12, \
create_scc13, create_scc14
from selfdrive.car.hyundai.values import Buttons, SteerLimitParams, CAR
from opendbc.can.packer import CANPacker
from selfdrive.config import Conversions as CV
VisualAlert = car.CarControl.HUDControl.VisualAlert
min_set_speed = 30 * CV.KPH_TO_MS
ACCEL_HYST_GAP = 0.02
ACCEL_MAX = 1.5 # 1.5 m/s2
ACCEL_MIN = -3.0 # 3 m/s2
ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN)
def accel_hysteresis(accel, accel_steady):
# for small accel oscillations within ACCEL_HYST_GAP, don't change the accel command
if accel > accel_steady + ACCEL_HYST_GAP:
accel_steady = accel - ACCEL_HYST_GAP
elif accel < accel_steady - ACCEL_HYST_GAP:
accel_steady = accel + ACCEL_HYST_GAP
accel = accel_steady
return accel, accel_steady
def process_hud_alert(enabled, fingerprint, visual_alert, left_lane,
right_lane, left_lane_depart, right_lane_depart, button_on):
sys_warning = (visual_alert == VisualAlert.steerRequired)
sys_state = 1
if not button_on:
lane_visible = 0
if left_lane and right_lane or sys_warning:
if enabled or sys_warning:
sys_state = 3
else:
sys_state = 4
elif left_lane:
sys_state = 5
elif right_lane:
sys_state = 6
left_lane_warning = 0
right_lane_warning = 0
if left_lane_depart:
left_lane_warning = 1 if fingerprint in [CAR.HYUNDAI_GENESIS, CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
if right_lane_depart:
right_lane_warning = 1 if fingerprint in [CAR.HYUNDAI_GENESIS, CAR.GENESIS_G90, CAR.GENESIS_G80] else 2
return sys_warning, sys_state, left_lane_warning, right_lane_warning
class CarController():
def __init__(self, dbc_name, CP, VM):
self.car_fingerprint = CP.carFingerprint
self.packer = CANPacker(dbc_name)
self.accel_steady = 0
self.apply_steer_last = 0
self.steer_rate_limited = False
self.lkas11_cnt = 0
self.scc12_cnt = 0
self.resume_cnt = 0
self.last_resume_frame = 0
self.last_lead_distance = 0
self.turning_signal_timer = 0
self.lkas_button_on = True
self.longcontrol = CP.openpilotLongitudinalControl
self.scc_live = not CP.radarOffCan
def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart, set_speed, lead_visible):
apply_accel = actuators.gas - actuators.brake
apply_accel, self.accel_steady = accel_hysteresis(apply_accel, self.accel_steady)
apply_accel = clip(apply_accel * ACCEL_SCALE, ACCEL_MIN, ACCEL_MAX)
new_steer = actuators.steer * SteerLimitParams.STEER_MAX
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, SteerLimitParams)
self.steer_rate_limited = new_steer != apply_steer
lkas_active = enabled and abs(CS.out.steeringAngle) < 90. and self.lkas_button_on
if CS.out.vEgo < 60 * CV.KPH_TO_MS and self.car_fingerprint == CAR.HYUNDAI_GENESIS and not CS.mdps_bus:
lkas_active = 0
if CS.out.leftBlinker or CS.out.rightBlinker:
if self.car_fingerprint not in [CAR.KIA_OPTIMA, CAR.KIA_OPTIMA_H]:
self.turning_signal_timer = 100
elif CS.left_blinker_flash or CS.right_blinker_flash:
self.turning_signal_timer = 100
if self.turning_indicator_alert:
lkas_active = 0
if self.turning_signal_timer > 0:
self.turning_signal_timer -= 1
if not lkas_active:
apply_steer = 0
self.apply_accel_last = apply_accel
self.apply_steer_last = apply_steer
sys_warning, sys_state, left_lane_warning, right_lane_warning =\
process_hud_alert(lkas_active, self.car_fingerprint, visual_alert,
left_lane, right_lane, left_lane_depart, right_lane_depart,
self.lkas_button_on)
clu11_speed = CS.clu11["CF_Clu_Vanz"]
enabled_speed = 38 if CS.is_set_speed_in_mph else 60
if clu11_speed > enabled_speed or not lkas_active:
enabled_speed = clu11_speed
if set_speed < min_set_speed:
set_speed = min_set_speed
set_speed *= CV.MS_TO_MPH if CS.is_set_speed_in_mph else CV.MS_TO_KPH
if frame == 0:
self.lkas11_cnt = CS.lkas11["CF_Lkas_MsgCount"]
self.scc12_cnt = CS.scc12["CR_VSM_Alive"] + 1 if not CS.no_radar else 0
self.prev_scc_cnt = CS.scc11["AliveCounterACC"]
self.lkas11_cnt = (self.lkas11_cnt + 1) % 0x10
self.scc12_cnt %= 0xF
can_sends = []
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 0))
if CS.mdps_bus or CS.scc_bus == 1:
can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active,
CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane,
left_lane_warning, right_lane_warning, 1))
if frame % 2 and CS.mdps_bus:
can_sends.append(create_clu11(self.packer, frame, CS.mdps_bus, CS.clu11, Buttons.NONE, enabled_speed))
if pcm_cancel_cmd and self.longcontrol:
can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.CANCEL, clu11_speed))
elif CS.mdps_bus:
can_sends.append(create_mdps12(self.packer, frame, CS.mdps12))
if self.longcontrol and (CS.scc_bus or not self.scc_live) and frame % 2 == 0:
can_sends.append(create_scc12(self.packer, apply_accel, enabled, self.scc12_cnt, self.scc_live, CS.scc12))
can_sends.append(create_scc11(self.packer, frame, enabled, set_speed, lead_visible, self.scc_live, CS.scc11))
if CS.has_scc13 and frame % 20 == 0:
can_sends.append(create_scc13(self.packer, CS.scc13))
if CS.has_scc14:
can_sends.append(create_scc14(self.packer, enabled, CS.scc14))
self.scc12_cnt += 1
if CS.out.cruiseState.standstill:
if self.last_lead_distance == 0:
self.last_lead_distance = CS.lead_distance
self.resume_cnt = 0
elif CS.lead_distance != self.last_lead_distance and (frame - self.last_resume_frame) > 5:
can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.RES_ACCEL, clu11_speed))
self.resume_cnt += 1
if self.resume_cnt > 5:
self.last_resume_frame = frame
self.resume_cnt = 0
elif self.last_lead_distance != 0:
self.last_lead_distance = 0
if frame % 5 == 0 and self.car_fingerprint in [CAR.SONATA, CAR.PALISADE, CAR.SONATA_H, CAR.SANTA_FE]:
can_sends.append(create_lfa_mfa(self.packer, frame, enabled))
return can_sends
| true | true |
f7265a0b69f2307625509d7efc09f1872f2f80c4 | 8,067 | py | Python | trading-with-python/util/trendy.py | mikimaus78/ml_monorepo | b2c2627ff0e86e27f6829170d0dac168d8e5783b | [
"BSD-3-Clause"
] | 51 | 2019-02-01T19:43:37.000Z | 2022-03-16T09:07:03.000Z | trading-with-python/util/trendy.py | mikimaus78/ml_monorepo | b2c2627ff0e86e27f6829170d0dac168d8e5783b | [
"BSD-3-Clause"
] | 2 | 2019-02-23T18:54:22.000Z | 2019-11-09T01:30:32.000Z | trading-with-python/util/trendy.py | mikimaus78/ml_monorepo | b2c2627ff0e86e27f6829170d0dac168d8e5783b | [
"BSD-3-Clause"
] | 35 | 2019-02-08T02:00:31.000Z | 2022-03-01T23:17:00.000Z | import numpy as np
from filter import movingaverage
def gentrends(x, window=1/3.0, charts=True):
"""
Returns a Pandas dataframe with support and resistance lines.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
import pandas.io.data as pd
x = np.array(x)
if window < 1:
window = int(window * len(x))
max1 = np.where(x == max(x))[0][0] # find the index of the abs max
min1 = np.where(x == min(x))[0][0] # find the index of the abs min
# First the max
if max1 + window > len(x):
max2 = max(x[0:(max1 - window)])
else:
max2 = max(x[(max1 + window):])
# Now the min
if min1 - window < 0:
min2 = min(x[(min1 + window):])
else:
min2 = min(x[0:(min1 - window)])
# Now find the indices of the secondary extrema
max2 = np.where(x == max2)[0][0] # find the index of the 2nd max
min2 = np.where(x == min2)[0][0] # find the index of the 2nd min
# Create & extend the lines
maxslope = (x[max1] - x[max2]) / (max1 - max2) # slope between max points
minslope = (x[min1] - x[min2]) / (min1 - min2) # slope between min points
a_max = x[max1] - (maxslope * max1) # y-intercept for max trendline
a_min = x[min1] - (minslope * min1) # y-intercept for min trendline
b_max = x[max1] + (maxslope * (len(x) - max1)) # extend to last data pt
b_min = x[min1] + (minslope * (len(x) - min1)) # extend to last data point
maxline = np.linspace(a_max, b_max, len(x)) # Y values between max's
minline = np.linspace(a_min, b_min, len(x)) # Y values between min's
# OUTPUT
trends = np.transpose(np.array((x, maxline, minline)))
trends = pd.DataFrame(trends, index=np.arange(0, len(x)),
columns=['Data', 'Max Line', 'Min Line'])
if charts is True:
from matplotlib.pyplot import plot, grid, show, figure
figure()
plot(trends)
grid()
show()
return trends, maxslope, minslope
def segtrends(x, segments=2, charts=True, window=7):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
y = np.array(x)
n=len(y)
movy = movingaverage(y, window)
# Implement trendlines and Find the indexes of these maxima in the data
segments = int(segments)
maxima = np.ones(segments)
minima = np.ones(segments)
x_maxima = np.ones(segments)
x_minima = np.ones(segments)
segsize = int(len(y)/segments)
for i in range(1, segments+1):
ind2 = i*segsize
ind1 = ind2 - segsize
seg = y[ind1:ind2]
maxima[i-1] = max(seg)
minima[i-1] = min(seg)
x_maxima[i-1] = ind1 + (np.where(seg == maxima[i-1])[0][0])
x_minima[i-1] = ind1 + (np.where(seg == minima[i-1])[0][0])
if charts:
import matplotlib.pyplot as plt
plt.plot(y)
plt.grid(True)
for i in range(0, segments-1):
maxslope = (maxima[i+1] - maxima[i]) / (x_maxima[i+1] - x_maxima[i])
a_max = maxima[i] - (maxslope * x_maxima[i])
b_max = maxima[i] + (maxslope * (len(y) - x_maxima[i]))
maxline = np.linspace(a_max, b_max, len(y))
minslope = (minima[i+1] - minima[i]) / (x_minima[i+1] - x_minima[i])
a_min = minima[i] - (minslope * x_minima[i])
b_min = minima[i] + (minslope * (len(y) - x_minima[i]))
minline = np.linspace(a_min, b_min, len(y))
if charts:
#plt.plot(maxline, 'g')
#plt.plot(minline, 'r')
pass
if charts:
plt.plot(range(n), movy, 'b')
plt.plot(x_maxima, maxima, 'g')
plt.plot(x_minima, minima, 'r')
plt.show()
# OUTPUT
return x_maxima, maxima, x_minima, minima
def minitrends(x, window=20, charts=True):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
y = np.array(x)
if window < 1: # if window is given as fraction of data length
window = float(window)
window = int(window * len(y))
x = np.arange(0, len(y))
dy = y[window:] - y[:-window]
crit = dy[:-1] * dy[1:] < 0
# Find whether max's or min's
maxi = (y[x[crit]] - y[x[crit] + window] > 0) & \
(y[x[crit]] - y[x[crit] - window] > 0) * 1
mini = (y[x[crit]] - y[x[crit] + window] < 0) & \
(y[x[crit]] - y[x[crit] - window] < 0) * 1
maxi = maxi.astype(float)
mini = mini.astype(float)
maxi[maxi == 0] = np.nan
mini[mini == 0] = np.nan
xmax = x[crit] * maxi
xmax = xmax[~np.isnan(xmax)]
xmax = xmax.astype(int)
xmin = x[crit] * mini
xmin = xmin[~np.isnan(xmin)]
xmin = xmin.astype(int)
# See if better max or min in region
yMax = np.array([])
xMax = np.array([])
for i in xmax:
indx = np.where(xmax == i)[0][0] + 1
try:
Y = y[i:xmax[indx]]
yMax = np.append(yMax, Y.max())
xMax = np.append(xMax, np.where(y == yMax[-1])[0][0])
except:
pass
yMin = np.array([])
xMin = np.array([])
for i in xmin:
indx = np.where(xmin == i)[0][0] + 1
try:
Y = y[i:xmin[indx]]
yMin = np.append(yMin, Y.min())
xMin = np.append(xMin, np.where(y == yMin[-1])[0][0])
except:
pass
if y[-1] > yMax[-1]:
yMax = np.append(yMax, y[-1])
xMax = np.append(xMax, x[-1])
if y[0] not in yMax:
yMax = np.insert(yMax, 0, y[0])
xMax = np.insert(xMax, 0, x[0])
if y[-1] < yMin[-1]:
yMin = np.append(yMin, y[-1])
xMin = np.append(xMin, x[-1])
if y[0] not in yMin:
yMin = np.insert(yMin, 0, y[0])
xMin = np.insert(xMin, 0, x[0])
# Plot results if desired
if charts is True:
from matplotlib.pyplot import plot, show, grid
plot(x, y)
plot(xMax, yMax, '-o')
plot(xMin, yMin, '-o')
grid(True)
show()
# Return arrays of critical points
return xMax, yMax, xMin, yMin
def iterlines(x, window=30, charts=True):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
x = np.array(x)
n = len(x)
if window < 1:
window = int(window * n)
sigs = np.zeros(n, dtype=float)
i = window
while i != n:
if x[i] > max(x[i-window:i]): sigs[i] = 1
elif x[i] < min(x[i-window:i]): sigs[i] = -1
i += 1
xmin = np.where(sigs == -1.0)[0]
xmax = np.where(sigs == 1.0)[0]
ymin = x[xmin]
ymax = x[xmax]
if charts is True:
from matplotlib.pyplot import plot, grid, show
plot(x)
plot(xmin, ymin, 'ro')
plot(xmax, ymax, 'go')
grid(True)
show()
return sigs
| 32.792683 | 79 | 0.567497 | import numpy as np
from filter import movingaverage
def gentrends(x, window=1/3.0, charts=True):
import numpy as np
import pandas.io.data as pd
x = np.array(x)
if window < 1:
window = int(window * len(x))
max1 = np.where(x == max(x))[0][0]
min1 = np.where(x == min(x))[0][0]
if max1 + window > len(x):
max2 = max(x[0:(max1 - window)])
else:
max2 = max(x[(max1 + window):])
if min1 - window < 0:
min2 = min(x[(min1 + window):])
else:
min2 = min(x[0:(min1 - window)])
max2 = np.where(x == max2)[0][0]
min2 = np.where(x == min2)[0][0]
maxslope = (x[max1] - x[max2]) / (max1 - max2)
minslope = (x[min1] - x[min2]) / (min1 - min2)
a_max = x[max1] - (maxslope * max1)
a_min = x[min1] - (minslope * min1)
b_max = x[max1] + (maxslope * (len(x) - max1))
b_min = x[min1] + (minslope * (len(x) - min1))
maxline = np.linspace(a_max, b_max, len(x))
minline = np.linspace(a_min, b_min, len(x)) # Y values between min's
trends = np.transpose(np.array((x, maxline, minline)))
trends = pd.DataFrame(trends, index=np.arange(0, len(x)),
columns=['Data', 'Max Line', 'Min Line'])
if charts is True:
from matplotlib.pyplot import plot, grid, show, figure
figure()
plot(trends)
grid()
show()
return trends, maxslope, minslope
def segtrends(x, segments=2, charts=True, window=7):
import numpy as np
y = np.array(x)
n=len(y)
movy = movingaverage(y, window)
segments = int(segments)
maxima = np.ones(segments)
minima = np.ones(segments)
x_maxima = np.ones(segments)
x_minima = np.ones(segments)
segsize = int(len(y)/segments)
for i in range(1, segments+1):
ind2 = i*segsize
ind1 = ind2 - segsize
seg = y[ind1:ind2]
maxima[i-1] = max(seg)
minima[i-1] = min(seg)
x_maxima[i-1] = ind1 + (np.where(seg == maxima[i-1])[0][0])
x_minima[i-1] = ind1 + (np.where(seg == minima[i-1])[0][0])
if charts:
import matplotlib.pyplot as plt
plt.plot(y)
plt.grid(True)
for i in range(0, segments-1):
maxslope = (maxima[i+1] - maxima[i]) / (x_maxima[i+1] - x_maxima[i])
a_max = maxima[i] - (maxslope * x_maxima[i])
b_max = maxima[i] + (maxslope * (len(y) - x_maxima[i]))
maxline = np.linspace(a_max, b_max, len(y))
minslope = (minima[i+1] - minima[i]) / (x_minima[i+1] - x_minima[i])
a_min = minima[i] - (minslope * x_minima[i])
b_min = minima[i] + (minslope * (len(y) - x_minima[i]))
minline = np.linspace(a_min, b_min, len(y))
if charts:
pass
if charts:
plt.plot(range(n), movy, 'b')
plt.plot(x_maxima, maxima, 'g')
plt.plot(x_minima, minima, 'r')
plt.show()
return x_maxima, maxima, x_minima, minima
def minitrends(x, window=20, charts=True):
import numpy as np
y = np.array(x)
if window < 1:
window = float(window)
window = int(window * len(y))
x = np.arange(0, len(y))
dy = y[window:] - y[:-window]
crit = dy[:-1] * dy[1:] < 0
maxi = (y[x[crit]] - y[x[crit] + window] > 0) & \
(y[x[crit]] - y[x[crit] - window] > 0) * 1
mini = (y[x[crit]] - y[x[crit] + window] < 0) & \
(y[x[crit]] - y[x[crit] - window] < 0) * 1
maxi = maxi.astype(float)
mini = mini.astype(float)
maxi[maxi == 0] = np.nan
mini[mini == 0] = np.nan
xmax = x[crit] * maxi
xmax = xmax[~np.isnan(xmax)]
xmax = xmax.astype(int)
xmin = x[crit] * mini
xmin = xmin[~np.isnan(xmin)]
xmin = xmin.astype(int)
yMax = np.array([])
xMax = np.array([])
for i in xmax:
indx = np.where(xmax == i)[0][0] + 1
try:
Y = y[i:xmax[indx]]
yMax = np.append(yMax, Y.max())
xMax = np.append(xMax, np.where(y == yMax[-1])[0][0])
except:
pass
yMin = np.array([])
xMin = np.array([])
for i in xmin:
indx = np.where(xmin == i)[0][0] + 1
try:
Y = y[i:xmin[indx]]
yMin = np.append(yMin, Y.min())
xMin = np.append(xMin, np.where(y == yMin[-1])[0][0])
except:
pass
if y[-1] > yMax[-1]:
yMax = np.append(yMax, y[-1])
xMax = np.append(xMax, x[-1])
if y[0] not in yMax:
yMax = np.insert(yMax, 0, y[0])
xMax = np.insert(xMax, 0, x[0])
if y[-1] < yMin[-1]:
yMin = np.append(yMin, y[-1])
xMin = np.append(xMin, x[-1])
if y[0] not in yMin:
yMin = np.insert(yMin, 0, y[0])
xMin = np.insert(xMin, 0, x[0])
if charts is True:
from matplotlib.pyplot import plot, show, grid
plot(x, y)
plot(xMax, yMax, '-o')
plot(xMin, yMin, '-o')
grid(True)
show()
return xMax, yMax, xMin, yMin
def iterlines(x, window=30, charts=True):
import numpy as np
x = np.array(x)
n = len(x)
if window < 1:
window = int(window * n)
sigs = np.zeros(n, dtype=float)
i = window
while i != n:
if x[i] > max(x[i-window:i]): sigs[i] = 1
elif x[i] < min(x[i-window:i]): sigs[i] = -1
i += 1
xmin = np.where(sigs == -1.0)[0]
xmax = np.where(sigs == 1.0)[0]
ymin = x[xmin]
ymax = x[xmax]
if charts is True:
from matplotlib.pyplot import plot, grid, show
plot(x)
plot(xmin, ymin, 'ro')
plot(xmax, ymax, 'go')
grid(True)
show()
return sigs
| true | true |
f7265b61b492e4080f0306586bfd4421f042a4b1 | 9,483 | py | Python | gamestonk_terminal/stocks/insider/openinsider_view.py | jbushago/GamestonkTerminal | 73a2b419664bf62bbdc59aa8402c8cd6a913a518 | [
"MIT"
] | 1 | 2022-03-15T13:05:40.000Z | 2022-03-15T13:05:40.000Z | gamestonk_terminal/stocks/insider/openinsider_view.py | jbushago/GamestonkTerminal | 73a2b419664bf62bbdc59aa8402c8cd6a913a518 | [
"MIT"
] | null | null | null | gamestonk_terminal/stocks/insider/openinsider_view.py | jbushago/GamestonkTerminal | 73a2b419664bf62bbdc59aa8402c8cd6a913a518 | [
"MIT"
] | null | null | null | import itertools
import logging
import os
import textwrap
from typing import List
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import (
export_data,
patch_pandas_text_adjustment,
print_rich_table,
)
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.insider.openinsider_model import (
get_open_insider_data,
get_open_insider_link,
)
from gamestonk_terminal import rich_config
logger = logging.getLogger(__name__)
d_open_insider = {
"lcb": "latest-cluster-buys",
"lpsb": "latest-penny-stock-buys",
"lit": "latest-insider-trading",
"lip": "insider-purchases",
"blip": "latest-insider-purchases-25k",
"blop": "latest-officer-purchases-25k",
"blcp": "latest-ceo-cfo-purchases-25k",
"lis": "insider-sales",
"blis": "latest-insider-sales-100k",
"blos": "latest-officer-sales-100k",
"blcs": "latest-ceo-cfo-sales-100k",
"topt": "top-officer-purchases-of-the-day",
"toppw": "top-officer-purchases-of-the-week",
"toppm": "top-officer-purchases-of-the-month",
"tipt": "top-insider-purchases-of-the-day",
"tippw": "top-insider-purchases-of-the-week",
"tippm": "top-insider-purchases-of-the-month",
"tist": "top-insider-sales-of-the-day",
"tispw": "top-insider-sales-of-the-week",
"tispm": "top-insider-sales-of-the-month",
}
d_notes = {
"A": "A: Amended filing",
"D": "D: Derivative transaction in filing (usually option exercise)",
"E": "E: Error detected in filing",
"M": "M: Multiple transactions in filing; earliest reported transaction date & weighted average transaction price",
}
d_trade_types = {
"S - Sale": "[red]S - Sale: Sale of securities on an exchange or to another person[/red]",
"S - Sale+OE": "[yellow]S - Sale+OE: Sale of securities "
"on an exchange or to another person (after option exercise)[/yellow]",
"F - Tax": "[magenta]F - Tax: Payment of exercise price or "
"tax liability using portion of securities received from the company[/magenta]",
"P - Purchase": "[green]P - Purchase: Purchase of securities on "
"an exchange or from another person[/green]",
}
def lambda_red_highlight(values) -> List[str]:
"""Red highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return [f"[red]{val}[/red]" for val in values]
def lambda_yellow_highlight(values) -> List[str]:
"""Yellow highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return [f"[yellow]{val}[/yellow]" for val in values]
def lambda_magenta_highlight(values):
"""Magenta highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return [f"[magenta]{val}[/magenta]" for val in values]
def lambda_green_highlight(values):
"""Green highlight
Parameters
----------
values : List[str]
dataframe values to color
Returns
----------
List[str]
colored dataframes values
"""
return [f"[green]{val}[/green]" for val in values]
@log_start_end(log=logger)
def print_insider_data(type_insider: str, limit: int = 10, export: str = ""):
"""Print insider data
Parameters
----------
type_insider: str
Insider type of data
limit: int
Limit of data rows to display
export: str
Export data format
"""
response = requests.get(f"http://openinsider.com/{d_open_insider[type_insider]}")
soup = BeautifulSoup(response.text, "html.parser")
table = soup.find("table", {"class": "tinytable"})
if not table:
console.print("No insider information found", "\n")
return
table_rows = table.find_all("tr")
res = []
for tr in table_rows:
td = tr.find_all("td")
row = [tr.text.strip() for tr in td if tr.text.strip()]
res.append(row)
df = pd.DataFrame(res).dropna().head(n=limit)
columns = [
"X",
"Filing Date",
"Trade Date",
"Ticker",
"Company Name",
"Industry" if type_insider == "lcb" else "Insider Name",
"Title",
"Trade Type",
"Price",
"Qty",
"Owned",
"Diff Own",
"Value",
]
if df.shape[1] == 13:
df.columns = columns
else:
df.columns = columns[1:]
df["Filing Date"] = df["Filing Date"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x
)
df["Company Name"] = df["Company Name"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
df["Title"] = df["Title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x
)
if type_insider == "lcb":
df["Industry"] = df["Industry"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
else:
df["Insider Name"] = df["Insider Name"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
print_rich_table(
df,
headers=[x.title() for x in df.columns],
show_index=False,
title="Insider Data",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), type_insider, df)
if df.shape[1] == 13:
l_chars = [list(chars) for chars in df["X"].values]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
for char in l_uchars:
console.print(d_notes[char])
console.print("")
@log_start_end(log=logger)
def print_insider_filter(
preset_loaded: str,
ticker: str,
limit: int = 10,
links: bool = False,
export: str = "",
):
"""Print insider filter based on loaded preset. [Source: OpenInsider]
Parameters
----------
preset_loaded : str
Loaded preset filter
ticker : str
Stock ticker
limit : int
Limit of rows of data to display
links : bool
Flag to show hyperlinks
export : str
Format to export data
"""
if ticker:
link = f"http://openinsider.com/screener?s={ticker}"
else:
link = get_open_insider_link(preset_loaded)
if not link:
console.print("")
return
df_insider = get_open_insider_data(link, has_company_name=bool(not ticker))
df_insider_orig = df_insider.copy()
if df_insider.empty:
console.print("No insider data found\n")
return
if links:
df_insider = df_insider[["Ticker Link", "Insider Link", "Filing Link"]].head(
limit
)
else:
df_insider = df_insider.drop(
columns=["Filing Link", "Ticker Link", "Insider Link"]
).head(limit)
if rich_config.USE_COLOR and not links:
if not df_insider[df_insider["Trade Type"] == "S - Sale"].empty:
df_insider[df_insider["Trade Type"] == "S - Sale"] = df_insider[
df_insider["Trade Type"] == "S - Sale"
].apply(lambda_red_highlight)
if not df_insider[df_insider["Trade Type"] == "S - Sale+OE"].empty:
df_insider[df_insider["Trade Type"] == "S - Sale+OE"] = df_insider[
df_insider["Trade Type"] == "S - Sale+OE"
].apply(lambda_yellow_highlight)
if not df_insider[df_insider["Trade Type"] == "F - Tax"].empty:
df_insider[df_insider["Trade Type"] == "F - Tax"] = df_insider[
df_insider["Trade Type"] == "F - Tax"
].apply(lambda_magenta_highlight)
if not df_insider[df_insider["Trade Type"] == "P - Purchase"].empty:
df_insider[df_insider["Trade Type"] == "P - Purchase"] = df_insider[
df_insider["Trade Type"] == "P - Purchase"
].apply(lambda_green_highlight)
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", 0)
pd.set_option("display.max_rows", None)
# needs to be done because table is too large :(
df_insider = df_insider.drop(columns=["Filing Date", "Trade Type"])
else:
# needs to be done because table is too large :(
df_insider = df_insider.drop(columns=["Filing Date"])
console.print("")
print_rich_table(
df_insider,
headers=[x.title() for x in df_insider.columns],
title="Insider filtered",
)
if export:
if preset_loaded:
cmd = "filter"
if ticker:
cmd = "lis"
export_data(export, os.path.dirname(os.path.abspath(__file__)), cmd, df_insider)
if not links:
l_chars = [list(chars) for chars in df_insider_orig["X"].values]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
console.print("")
for char in l_uchars:
console.print(d_notes[char])
l_tradetype = df_insider_orig["Trade Type"].values
l_utradetype = np.unique(l_tradetype)
console.print("")
for tradetype in l_utradetype:
console.print(d_trade_types[tradetype])
console.print("")
| 29.178462 | 119 | 0.605505 | import itertools
import logging
import os
import textwrap
from typing import List
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import (
export_data,
patch_pandas_text_adjustment,
print_rich_table,
)
from gamestonk_terminal.rich_config import console
from gamestonk_terminal.stocks.insider.openinsider_model import (
get_open_insider_data,
get_open_insider_link,
)
from gamestonk_terminal import rich_config
logger = logging.getLogger(__name__)
d_open_insider = {
"lcb": "latest-cluster-buys",
"lpsb": "latest-penny-stock-buys",
"lit": "latest-insider-trading",
"lip": "insider-purchases",
"blip": "latest-insider-purchases-25k",
"blop": "latest-officer-purchases-25k",
"blcp": "latest-ceo-cfo-purchases-25k",
"lis": "insider-sales",
"blis": "latest-insider-sales-100k",
"blos": "latest-officer-sales-100k",
"blcs": "latest-ceo-cfo-sales-100k",
"topt": "top-officer-purchases-of-the-day",
"toppw": "top-officer-purchases-of-the-week",
"toppm": "top-officer-purchases-of-the-month",
"tipt": "top-insider-purchases-of-the-day",
"tippw": "top-insider-purchases-of-the-week",
"tippm": "top-insider-purchases-of-the-month",
"tist": "top-insider-sales-of-the-day",
"tispw": "top-insider-sales-of-the-week",
"tispm": "top-insider-sales-of-the-month",
}
d_notes = {
"A": "A: Amended filing",
"D": "D: Derivative transaction in filing (usually option exercise)",
"E": "E: Error detected in filing",
"M": "M: Multiple transactions in filing; earliest reported transaction date & weighted average transaction price",
}
d_trade_types = {
"S - Sale": "[red]S - Sale: Sale of securities on an exchange or to another person[/red]",
"S - Sale+OE": "[yellow]S - Sale+OE: Sale of securities "
"on an exchange or to another person (after option exercise)[/yellow]",
"F - Tax": "[magenta]F - Tax: Payment of exercise price or "
"tax liability using portion of securities received from the company[/magenta]",
"P - Purchase": "[green]P - Purchase: Purchase of securities on "
"an exchange or from another person[/green]",
}
def lambda_red_highlight(values) -> List[str]:
return [f"[red]{val}[/red]" for val in values]
def lambda_yellow_highlight(values) -> List[str]:
return [f"[yellow]{val}[/yellow]" for val in values]
def lambda_magenta_highlight(values):
return [f"[magenta]{val}[/magenta]" for val in values]
def lambda_green_highlight(values):
return [f"[green]{val}[/green]" for val in values]
@log_start_end(log=logger)
def print_insider_data(type_insider: str, limit: int = 10, export: str = ""):
response = requests.get(f"http://openinsider.com/{d_open_insider[type_insider]}")
soup = BeautifulSoup(response.text, "html.parser")
table = soup.find("table", {"class": "tinytable"})
if not table:
console.print("No insider information found", "\n")
return
table_rows = table.find_all("tr")
res = []
for tr in table_rows:
td = tr.find_all("td")
row = [tr.text.strip() for tr in td if tr.text.strip()]
res.append(row)
df = pd.DataFrame(res).dropna().head(n=limit)
columns = [
"X",
"Filing Date",
"Trade Date",
"Ticker",
"Company Name",
"Industry" if type_insider == "lcb" else "Insider Name",
"Title",
"Trade Type",
"Price",
"Qty",
"Owned",
"Diff Own",
"Value",
]
if df.shape[1] == 13:
df.columns = columns
else:
df.columns = columns[1:]
df["Filing Date"] = df["Filing Date"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x
)
df["Company Name"] = df["Company Name"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
df["Title"] = df["Title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=10)) if isinstance(x, str) else x
)
if type_insider == "lcb":
df["Industry"] = df["Industry"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
else:
df["Insider Name"] = df["Insider Name"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=20)) if isinstance(x, str) else x
)
print_rich_table(
df,
headers=[x.title() for x in df.columns],
show_index=False,
title="Insider Data",
)
export_data(export, os.path.dirname(os.path.abspath(__file__)), type_insider, df)
if df.shape[1] == 13:
l_chars = [list(chars) for chars in df["X"].values]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
for char in l_uchars:
console.print(d_notes[char])
console.print("")
@log_start_end(log=logger)
def print_insider_filter(
preset_loaded: str,
ticker: str,
limit: int = 10,
links: bool = False,
export: str = "",
):
if ticker:
link = f"http://openinsider.com/screener?s={ticker}"
else:
link = get_open_insider_link(preset_loaded)
if not link:
console.print("")
return
df_insider = get_open_insider_data(link, has_company_name=bool(not ticker))
df_insider_orig = df_insider.copy()
if df_insider.empty:
console.print("No insider data found\n")
return
if links:
df_insider = df_insider[["Ticker Link", "Insider Link", "Filing Link"]].head(
limit
)
else:
df_insider = df_insider.drop(
columns=["Filing Link", "Ticker Link", "Insider Link"]
).head(limit)
if rich_config.USE_COLOR and not links:
if not df_insider[df_insider["Trade Type"] == "S - Sale"].empty:
df_insider[df_insider["Trade Type"] == "S - Sale"] = df_insider[
df_insider["Trade Type"] == "S - Sale"
].apply(lambda_red_highlight)
if not df_insider[df_insider["Trade Type"] == "S - Sale+OE"].empty:
df_insider[df_insider["Trade Type"] == "S - Sale+OE"] = df_insider[
df_insider["Trade Type"] == "S - Sale+OE"
].apply(lambda_yellow_highlight)
if not df_insider[df_insider["Trade Type"] == "F - Tax"].empty:
df_insider[df_insider["Trade Type"] == "F - Tax"] = df_insider[
df_insider["Trade Type"] == "F - Tax"
].apply(lambda_magenta_highlight)
if not df_insider[df_insider["Trade Type"] == "P - Purchase"].empty:
df_insider[df_insider["Trade Type"] == "P - Purchase"] = df_insider[
df_insider["Trade Type"] == "P - Purchase"
].apply(lambda_green_highlight)
patch_pandas_text_adjustment()
pd.set_option("display.max_colwidth", 0)
pd.set_option("display.max_rows", None)
df_insider = df_insider.drop(columns=["Filing Date", "Trade Type"])
else:
df_insider = df_insider.drop(columns=["Filing Date"])
console.print("")
print_rich_table(
df_insider,
headers=[x.title() for x in df_insider.columns],
title="Insider filtered",
)
if export:
if preset_loaded:
cmd = "filter"
if ticker:
cmd = "lis"
export_data(export, os.path.dirname(os.path.abspath(__file__)), cmd, df_insider)
if not links:
l_chars = [list(chars) for chars in df_insider_orig["X"].values]
l_uchars = np.unique(list(itertools.chain(*l_chars)))
console.print("")
for char in l_uchars:
console.print(d_notes[char])
l_tradetype = df_insider_orig["Trade Type"].values
l_utradetype = np.unique(l_tradetype)
console.print("")
for tradetype in l_utradetype:
console.print(d_trade_types[tradetype])
console.print("")
| true | true |
f7265be4393da76c22754da75898052c8a4b8c71 | 831 | py | Python | src/cpp/qpsolver/cvxopt/examples/doc/chap8/conelp.py | Hap-Hugh/quicksel | 10eee90b759638d5c54ba19994ae8e36e90e12b8 | [
"Apache-2.0"
] | 15 | 2020-07-07T16:32:53.000Z | 2022-03-16T14:23:23.000Z | src/cpp/qpsolver/cvxopt/examples/doc/chap8/conelp.py | Hap-Hugh/quicksel | 10eee90b759638d5c54ba19994ae8e36e90e12b8 | [
"Apache-2.0"
] | 2 | 2020-09-02T15:25:39.000Z | 2020-09-24T08:37:18.000Z | src/cpp/qpsolver/cvxopt/examples/doc/chap8/conelp.py | Hap-Hugh/quicksel | 10eee90b759638d5c54ba19994ae8e36e90e12b8 | [
"Apache-2.0"
] | 6 | 2020-08-14T22:02:07.000Z | 2021-03-31T07:08:29.000Z | # The small linear cone program of section 8.1 (Linear cone programs).
from cvxopt import matrix, solvers
c = matrix([-6., -4., -5.])
G = matrix([[ 16., 7., 24., -8., 8., -1., 0., -1., 0., 0., 7.,
-5., 1., -5., 1., -7., 1., -7., -4.],
[-14., 2., 7., -13., -18., 3., 0., 0., -1., 0., 3.,
13., -6., 13., 12., -10., -6., -10., -28.],
[ 5., 0., -15., 12., -6., 17., 0., 0., 0., -1., 9.,
6., -6., 6., -7., -7., -6., -7., -11.]])
h = matrix( [ -3., 5., 12., -2., -14., -13., 10., 0., 0., 0., 68.,
-30., -19., -30., 99., 23., -19., 23., 10.] )
dims = {'l': 2, 'q': [4, 4], 's': [3]}
sol = solvers.conelp(c, G, h, dims)
print("\nStatus: " + sol['status'])
print("\nx = \n")
print(sol['x'])
print("\nz = \n")
print(sol['z'])
| 39.571429 | 75 | 0.361011 |
from cvxopt import matrix, solvers
c = matrix([-6., -4., -5.])
G = matrix([[ 16., 7., 24., -8., 8., -1., 0., -1., 0., 0., 7.,
-5., 1., -5., 1., -7., 1., -7., -4.],
[-14., 2., 7., -13., -18., 3., 0., 0., -1., 0., 3.,
13., -6., 13., 12., -10., -6., -10., -28.],
[ 5., 0., -15., 12., -6., 17., 0., 0., 0., -1., 9.,
6., -6., 6., -7., -7., -6., -7., -11.]])
h = matrix( [ -3., 5., 12., -2., -14., -13., 10., 0., 0., 0., 68.,
-30., -19., -30., 99., 23., -19., 23., 10.] )
dims = {'l': 2, 'q': [4, 4], 's': [3]}
sol = solvers.conelp(c, G, h, dims)
print("\nStatus: " + sol['status'])
print("\nx = \n")
print(sol['x'])
print("\nz = \n")
print(sol['z'])
| true | true |
f7265c3d05c95935872b8f725e78bb38be5404e8 | 9,800 | py | Python | stacker/tests/test_plan.py | DomainGroupOSS/stacker | 88b71bc5cfcbbf7957245d821434b95801230425 | [
"BSD-2-Clause"
] | 1 | 2018-07-17T11:23:47.000Z | 2018-07-17T11:23:47.000Z | stacker/tests/test_plan.py | DomainGroupOSS/stacker | 88b71bc5cfcbbf7957245d821434b95801230425 | [
"BSD-2-Clause"
] | null | null | null | stacker/tests/test_plan.py | DomainGroupOSS/stacker | 88b71bc5cfcbbf7957245d821434b95801230425 | [
"BSD-2-Clause"
] | 1 | 2020-02-29T04:49:04.000Z | 2020-02-29T04:49:04.000Z | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
import os
import shutil
import tempfile
import unittest
import mock
from stacker.context import Context, Config
from stacker.dag import walk
from stacker.util import stack_template_key_name
from stacker.lookups.registry import (
register_lookup_handler,
unregister_lookup_handler,
)
from stacker.plan import (
Step,
build_plan,
)
from stacker.exceptions import (
CancelExecution,
GraphError,
PlanFailed,
)
from stacker.status import (
SUBMITTED,
COMPLETE,
SKIPPED,
FAILED,
)
from stacker.stack import Stack
from .factories import generate_definition
count = 0
class TestStep(unittest.TestCase):
def setUp(self):
stack = mock.MagicMock()
stack.name = "stack"
stack.fqn = "namespace-stack"
self.step = Step(stack=stack, fn=None)
def test_status(self):
self.assertFalse(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.submit()
self.assertEqual(self.step.status, SUBMITTED)
self.assertTrue(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.complete()
self.assertEqual(self.step.status, COMPLETE)
self.assertNotEqual(self.step.status, SUBMITTED)
self.assertTrue(self.step.submitted)
self.assertTrue(self.step.completed)
self.assertNotEqual(self.step.status, True)
self.assertNotEqual(self.step.status, False)
self.assertNotEqual(self.step.status, 'banana')
class TestPlan(unittest.TestCase):
def setUp(self):
self.count = 0
self.config = Config({"namespace": "namespace"})
self.context = Context(config=self.config)
register_lookup_handler("noop", lambda **kwargs: "test")
def tearDown(self):
unregister_lookup_handler("noop")
def test_plan(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
plan = build_plan(description="Test", steps=[
Step(vpc, fn=None), Step(bastion, fn=None)])
self.assertEqual(plan.graph.to_dict(), {
'bastion.1': set(['vpc.1']),
'vpc.1': set([])})
def test_execute_plan(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
return COMPLETE
plan = build_plan(
description="Test", steps=[Step(vpc, fn), Step(bastion, fn)])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_execute_plan_filtered(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
db = Stack(
definition=generate_definition('db', 1, requires=[vpc.name]),
context=self.context)
app = Stack(
definition=generate_definition('app', 1, requires=[db.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
return COMPLETE
plan = build_plan(
description="Test",
steps=[Step(vpc, fn), Step(db, fn), Step(app, fn)],
targets=['db.1'])
plan.execute(walk)
self.assertEquals(calls, [
'namespace-vpc.1', 'namespace-db.1'])
def test_execute_plan_exception(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.name == vpc_step.name:
raise ValueError('Boom')
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[vpc_step, bastion_step])
with self.assertRaises(PlanFailed):
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1'])
self.assertEquals(vpc_step.status, FAILED)
def test_execute_plan_skipped(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.fqn == vpc_step.name:
return SKIPPED
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[vpc_step, bastion_step])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_execute_plan_failed(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
db = Stack(
definition=generate_definition('db', 1),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.name == vpc_step.name:
return FAILED
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
db_step = Step(db, fn)
plan = build_plan(description="Test", steps=[
vpc_step, bastion_step, db_step])
with self.assertRaises(PlanFailed):
plan.execute(walk)
calls.sort()
self.assertEquals(calls, ['namespace-db.1', 'namespace-vpc.1'])
def test_execute_plan_cancelled(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.fqn == vpc_step.name:
raise CancelExecution
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[
vpc_step, bastion_step])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_build_plan_missing_dependency(self):
bastion = Stack(
definition=generate_definition(
'bastion', 1, requires=['vpc.1']),
context=self.context)
with self.assertRaises(GraphError) as expected:
build_plan(description="Test", steps=[Step(bastion, None)])
message_starts = (
"Error detected when adding 'vpc.1' "
"as a dependency of 'bastion.1':"
)
message_contains = "dependent node vpc.1 does not exist"
self.assertTrue(str(expected.exception).startswith(message_starts))
self.assertTrue(message_contains in str(expected.exception))
def test_build_plan_cyclic_dependencies(self):
vpc = Stack(
definition=generate_definition(
'vpc', 1),
context=self.context)
db = Stack(
definition=generate_definition(
'db', 1, requires=['app.1']),
context=self.context)
app = Stack(
definition=generate_definition(
'app', 1, requires=['db.1']),
context=self.context)
with self.assertRaises(GraphError) as expected:
build_plan(
description="Test",
steps=[Step(vpc, None), Step(db, None), Step(app, None)])
message = ("Error detected when adding 'db.1' "
"as a dependency of 'app.1': graph is "
"not acyclic")
self.assertEqual(str(expected.exception), message)
def test_dump(self, *args):
requires = None
steps = []
for i in range(5):
overrides = {
"variables": {
"PublicSubnets": "1",
"SshKeyName": "1",
"PrivateSubnets": "1",
"Random": "${noop something}",
},
"requires": requires,
}
stack = Stack(
definition=generate_definition('vpc', i, **overrides),
context=self.context)
requires = [stack.name]
steps += [Step(stack, None)]
plan = build_plan(description="Test", steps=steps)
tmp_dir = tempfile.mkdtemp()
try:
plan.dump(tmp_dir, context=self.context)
for step in plan.steps:
template_path = os.path.join(
tmp_dir,
stack_template_key_name(step.stack.blueprint))
self.assertTrue(os.path.isfile(template_path))
finally:
shutil.rmtree(tmp_dir)
| 30.434783 | 78 | 0.580408 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import range
import os
import shutil
import tempfile
import unittest
import mock
from stacker.context import Context, Config
from stacker.dag import walk
from stacker.util import stack_template_key_name
from stacker.lookups.registry import (
register_lookup_handler,
unregister_lookup_handler,
)
from stacker.plan import (
Step,
build_plan,
)
from stacker.exceptions import (
CancelExecution,
GraphError,
PlanFailed,
)
from stacker.status import (
SUBMITTED,
COMPLETE,
SKIPPED,
FAILED,
)
from stacker.stack import Stack
from .factories import generate_definition
count = 0
class TestStep(unittest.TestCase):
def setUp(self):
stack = mock.MagicMock()
stack.name = "stack"
stack.fqn = "namespace-stack"
self.step = Step(stack=stack, fn=None)
def test_status(self):
self.assertFalse(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.submit()
self.assertEqual(self.step.status, SUBMITTED)
self.assertTrue(self.step.submitted)
self.assertFalse(self.step.completed)
self.step.complete()
self.assertEqual(self.step.status, COMPLETE)
self.assertNotEqual(self.step.status, SUBMITTED)
self.assertTrue(self.step.submitted)
self.assertTrue(self.step.completed)
self.assertNotEqual(self.step.status, True)
self.assertNotEqual(self.step.status, False)
self.assertNotEqual(self.step.status, 'banana')
class TestPlan(unittest.TestCase):
def setUp(self):
self.count = 0
self.config = Config({"namespace": "namespace"})
self.context = Context(config=self.config)
register_lookup_handler("noop", lambda **kwargs: "test")
def tearDown(self):
unregister_lookup_handler("noop")
def test_plan(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
plan = build_plan(description="Test", steps=[
Step(vpc, fn=None), Step(bastion, fn=None)])
self.assertEqual(plan.graph.to_dict(), {
'bastion.1': set(['vpc.1']),
'vpc.1': set([])})
def test_execute_plan(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
return COMPLETE
plan = build_plan(
description="Test", steps=[Step(vpc, fn), Step(bastion, fn)])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_execute_plan_filtered(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
db = Stack(
definition=generate_definition('db', 1, requires=[vpc.name]),
context=self.context)
app = Stack(
definition=generate_definition('app', 1, requires=[db.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
return COMPLETE
plan = build_plan(
description="Test",
steps=[Step(vpc, fn), Step(db, fn), Step(app, fn)],
targets=['db.1'])
plan.execute(walk)
self.assertEquals(calls, [
'namespace-vpc.1', 'namespace-db.1'])
def test_execute_plan_exception(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.name == vpc_step.name:
raise ValueError('Boom')
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[vpc_step, bastion_step])
with self.assertRaises(PlanFailed):
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1'])
self.assertEquals(vpc_step.status, FAILED)
def test_execute_plan_skipped(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.fqn == vpc_step.name:
return SKIPPED
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[vpc_step, bastion_step])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_execute_plan_failed(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
db = Stack(
definition=generate_definition('db', 1),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.name == vpc_step.name:
return FAILED
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
db_step = Step(db, fn)
plan = build_plan(description="Test", steps=[
vpc_step, bastion_step, db_step])
with self.assertRaises(PlanFailed):
plan.execute(walk)
calls.sort()
self.assertEquals(calls, ['namespace-db.1', 'namespace-vpc.1'])
def test_execute_plan_cancelled(self):
vpc = Stack(
definition=generate_definition('vpc', 1),
context=self.context)
bastion = Stack(
definition=generate_definition('bastion', 1, requires=[vpc.name]),
context=self.context)
calls = []
def fn(stack, status=None):
calls.append(stack.fqn)
if stack.fqn == vpc_step.name:
raise CancelExecution
return COMPLETE
vpc_step = Step(vpc, fn)
bastion_step = Step(bastion, fn)
plan = build_plan(description="Test", steps=[
vpc_step, bastion_step])
plan.execute(walk)
self.assertEquals(calls, ['namespace-vpc.1', 'namespace-bastion.1'])
def test_build_plan_missing_dependency(self):
bastion = Stack(
definition=generate_definition(
'bastion', 1, requires=['vpc.1']),
context=self.context)
with self.assertRaises(GraphError) as expected:
build_plan(description="Test", steps=[Step(bastion, None)])
message_starts = (
"Error detected when adding 'vpc.1' "
"as a dependency of 'bastion.1':"
)
message_contains = "dependent node vpc.1 does not exist"
self.assertTrue(str(expected.exception).startswith(message_starts))
self.assertTrue(message_contains in str(expected.exception))
def test_build_plan_cyclic_dependencies(self):
vpc = Stack(
definition=generate_definition(
'vpc', 1),
context=self.context)
db = Stack(
definition=generate_definition(
'db', 1, requires=['app.1']),
context=self.context)
app = Stack(
definition=generate_definition(
'app', 1, requires=['db.1']),
context=self.context)
with self.assertRaises(GraphError) as expected:
build_plan(
description="Test",
steps=[Step(vpc, None), Step(db, None), Step(app, None)])
message = ("Error detected when adding 'db.1' "
"as a dependency of 'app.1': graph is "
"not acyclic")
self.assertEqual(str(expected.exception), message)
def test_dump(self, *args):
requires = None
steps = []
for i in range(5):
overrides = {
"variables": {
"PublicSubnets": "1",
"SshKeyName": "1",
"PrivateSubnets": "1",
"Random": "${noop something}",
},
"requires": requires,
}
stack = Stack(
definition=generate_definition('vpc', i, **overrides),
context=self.context)
requires = [stack.name]
steps += [Step(stack, None)]
plan = build_plan(description="Test", steps=steps)
tmp_dir = tempfile.mkdtemp()
try:
plan.dump(tmp_dir, context=self.context)
for step in plan.steps:
template_path = os.path.join(
tmp_dir,
stack_template_key_name(step.stack.blueprint))
self.assertTrue(os.path.isfile(template_path))
finally:
shutil.rmtree(tmp_dir)
| true | true |
f7265c6925ef94f77153adae115c2e78a7324353 | 3,058 | py | Python | over_roasted/settings.py | xmedinavei/over_roasted_app | c5b9525c6435d5d2285d86961eb8674108f2c88c | [
"MIT"
] | null | null | null | over_roasted/settings.py | xmedinavei/over_roasted_app | c5b9525c6435d5d2285d86961eb8674108f2c88c | [
"MIT"
] | null | null | null | over_roasted/settings.py | xmedinavei/over_roasted_app | c5b9525c6435d5d2285d86961eb8674108f2c88c | [
"MIT"
] | null | null | null | from pathlib import Path
import django_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3+7ekm6aghztb!h1b@xcvvjid8$o%rb7bb3bha446)d1pk573*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Local apps
'users',
'recipes',
# Third-party apps
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'over_roasted.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR.joinpath('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'over_roasted.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/users/login/'
LOGIN_REDIRECT_URL = '/recipes/'
LOGOUT_REDIRECT_URL = LOGIN_URL
django_heroku.settings(locals())
| 24.66129 | 91 | 0.692283 | from pathlib import Path
import django_heroku
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = '3+7ekm6aghztb!h1b@xcvvjid8$o%rb7bb3bha446)d1pk573*'
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
# Django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Local apps
'users',
'recipes',
# Third-party apps
'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'over_roasted.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR.joinpath('templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'over_roasted.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/users/login/'
LOGIN_REDIRECT_URL = '/recipes/'
LOGOUT_REDIRECT_URL = LOGIN_URL
django_heroku.settings(locals())
| true | true |
f7265d7477ff3fba1b5e7f80d15d88b7c11ed07e | 1,092 | py | Python | examples/finalterm-shell-integration.py | davidbrochart/python-prompt-toolkit | 8498692b31671fee7c5a426300a9df2ee290eae2 | [
"BSD-3-Clause"
] | 2 | 2020-04-12T01:23:25.000Z | 2021-05-22T13:46:00.000Z | examples/finalterm-shell-integration.py | davidbrochart/python-prompt-toolkit | 8498692b31671fee7c5a426300a9df2ee290eae2 | [
"BSD-3-Clause"
] | null | null | null | examples/finalterm-shell-integration.py | davidbrochart/python-prompt-toolkit | 8498692b31671fee7c5a426300a9df2ee290eae2 | [
"BSD-3-Clause"
] | 2 | 2016-12-30T23:57:44.000Z | 2021-05-22T13:50:21.000Z | #!/usr/bin/env python
"""
Mark the start and end of the prompt with Final term (iterm2) escape sequences.
See: https://iterm2.com/finalterm.html
"""
from __future__ import unicode_literals
from prompt_toolkit import prompt
from prompt_toolkit.token import Token
import sys
BEFORE_PROMPT = '\033]133;A\a'
AFTER_PROMPT = '\033]133;B\a'
BEFORE_OUTPUT = '\033]133;C\a'
AFTER_OUTPUT = '\033]133;D;{command_status}\a' # command_status is the command status, 0-255
def get_prompt_tokens(cli):
# Generate the tokens for the prompt.
# Important: use the `ZeroWidthEscape` token only if you are sure that
# writing this as raw text to the output will not introduce any
# cursor movements.
return [
(Token.ZeroWidthEscape, BEFORE_PROMPT),
(Token, 'Say something: # '),
(Token.ZeroWidthEscape, AFTER_PROMPT),
]
if __name__ == '__main__':
answer = prompt(get_prompt_tokens=get_prompt_tokens)
sys.stdout.write(BEFORE_OUTPUT)
print('You said: %s' % answer)
sys.stdout.write(AFTER_OUTPUT.format(command_status=0))
| 29.513514 | 92 | 0.701465 |
from __future__ import unicode_literals
from prompt_toolkit import prompt
from prompt_toolkit.token import Token
import sys
BEFORE_PROMPT = '\033]133;A\a'
AFTER_PROMPT = '\033]133;B\a'
BEFORE_OUTPUT = '\033]133;C\a'
AFTER_OUTPUT = '\033]133;D;{command_status}\a'
def get_prompt_tokens(cli):
return [
(Token.ZeroWidthEscape, BEFORE_PROMPT),
(Token, 'Say something: # '),
(Token.ZeroWidthEscape, AFTER_PROMPT),
]
if __name__ == '__main__':
answer = prompt(get_prompt_tokens=get_prompt_tokens)
sys.stdout.write(BEFORE_OUTPUT)
print('You said: %s' % answer)
sys.stdout.write(AFTER_OUTPUT.format(command_status=0))
| true | true |
f7265e08d1ad65a2a3c1ac2d5369c00df1bea063 | 2,164 | py | Python | tests/unit/utils/pyxb_utils.py | MaxTakahashi/hammr | cfe593ccfdddb7f98185e561feed6a40a866b585 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/pyxb_utils.py | MaxTakahashi/hammr | cfe593ccfdddb7f98185e561feed6a40a866b585 | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/pyxb_utils.py | MaxTakahashi/hammr | cfe593ccfdddb7f98185e561feed6a40a866b585 | [
"Apache-2.0"
] | null | null | null | # Copyright 2007-2017 UShareSoft SAS, All rights reserved
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# When anonymous type are used inside uforge.xsd, anonymous type such as "CTD_ANON_238" will be created inside uforge.py.
# We can't use directly these types in unit test for mocking otherwise unit tests may failed each time the uforge.xsd
# is slightly modified.
# So to avoid that, the type to used is retrieved dynamically using the PyXB internal attributes.
# This method returns the type of the element in a list attributes
# Example of use:
# regionType = get_pyXB_anon_type_for_list(regions.regionEntities)
# region = regionType()
# regions.regionEntities.append(region)
def get_pyXB_anon_type_for_list_attrb(list_attrb):
return list_attrb._PluralBinding__elementBinding._element__typeDefinition
# When anonymous type are used inside uforge.xsd, anonymous type such as "CTD_ANON_238" will be created inside uforge.py.
# We can't use directly these types in unit test for mocking otherwise unit tests may failed each time the uforge.xsd
# is slightly modified.
# So to avoid that, the type to used is retrieved dynamically using the PyXB internal attributes.
# This method returns the type of the attribute "attrb_name" in the "attrb_holder" object
# Example of use:
# flavorType = get_pyXB_anon_type_for_list(flavors.flavor)
# flavor = flavorType()
# flavors.flavor.append(flavor)
def get_pyXB_anon_type_for_simple_attrb(attrb_holder, attrb_name):
return getattr(attrb_holder, "_" + type(
attrb_holder).__name__ + "__" + attrb_name)._ElementDeclaration__elementBinding._element__typeDefinition | 55.487179 | 121 | 0.77634 |
# is slightly modified.
# So to avoid that, the type to used is retrieved dynamically using the PyXB internal attributes.
# This method returns the type of the element in a list attributes
# Example of use:
# regionType = get_pyXB_anon_type_for_list(regions.regionEntities)
# region = regionType()
# regions.regionEntities.append(region)
def get_pyXB_anon_type_for_list_attrb(list_attrb):
return list_attrb._PluralBinding__elementBinding._element__typeDefinition
# When anonymous type are used inside uforge.xsd, anonymous type such as "CTD_ANON_238" will be created inside uforge.py.
# We can't use directly these types in unit test for mocking otherwise unit tests may failed each time the uforge.xsd
def get_pyXB_anon_type_for_simple_attrb(attrb_holder, attrb_name):
return getattr(attrb_holder, "_" + type(
attrb_holder).__name__ + "__" + attrb_name)._ElementDeclaration__elementBinding._element__typeDefinition | true | true |
f7265e3a53380992aefc3e1f0e5022e3b36f9db4 | 4,782 | py | Python | ADSref.py | SimonJMurphy/ADSref | c144d077d622987c42d5cf8d9d111afa04073af2 | [
"MIT"
] | 2 | 2021-01-14T10:13:32.000Z | 2021-01-31T23:51:09.000Z | ADSref.py | SimonJMurphy/ADSref | c144d077d622987c42d5cf8d9d111afa04073af2 | [
"MIT"
] | null | null | null | ADSref.py | SimonJMurphy/ADSref | c144d077d622987c42d5cf8d9d111afa04073af2 | [
"MIT"
] | null | null | null | import ads
ads.config.token = 'my token'
import numpy as np
# Filenames
## Enter the filename for first-author publications here:
first_author = "first_author.bib"
## Enter the filename for cd-authored publications here:
co_author = "co_author.bib"
# Function Declarations
def extract_bibcodes(filename):
"""Takes a .bib filename, looks for bibcodes on the first line of each entry, and parses into a list."""
f = open(filename)
full_list = f.readlines()
bibcodes = []
# drop yCat, arxiv, PhDT, and other non-refereed entries
# Workaround, since I couldn't get the API to accept property:refereed or property=refereed to work when searching
exclude = ['arXiv','tmp','yCat','PhDT','AAS','ASPC','BSRSL','conf','EPJWC','IAUFM','IAUGA','IAUS','hst','iue','jwst','spzr','prop']
for line in full_list:
if line[0] == "@":
if not any(x in line for x in exclude):
bibcodes.append(line.split("{")[1].replace(",\n",""))
return bibcodes
def author_format(authors):
if len(authors) == 1:
a = authors[0]
elif len(authors) == 2:
a = authors[0] + " \& " + authors[1]
else:
a = authors[0] + ' et al.'
return a
def journal_name(bibcode):
return bibcode.split(".")[0][4:].replace("&","\&")
def adsurl(bibcode):
return 'https://ui.adsabs.harvard.edu/abs/' + bibcode
def latex_title_greek(title):
greek_dict = {"α":r"$\alpha$", "β":r"$\beta$", "γ":r"$\gamma$", "δ":r"$\delta$", "ε":r"$\epsilon$", "ζ":r"$\zeta$", "η":r"$\eta$", "ι":r"$\iota$", "θ":r"$\theta$", "κ":r"$\kappa$", "λ":r"$\lambda$", "μ":r"$\mu$", "ν":r"$\nu$", "ξ":r"$\xi$", "π":r"$\pi$", "ρ":r"$\rho$", "σ":r"$\sigma$", "τ":r"$\tau$", "φ":r"$\phi$", "χ":r"$\chi$", "ψ":r"$\psi$", "ω":r"$\omega$"}
for key in greek_dict.keys():
title = title.replace(key, greek_dict[key])
return title
def citation_formatter(cites):
if cites is None:
return ""
elif cites == 0:
return ""
else:
if cites < 10:
return f"Cited: \\phantom" + "{1}" + f"{cites}"
else:
return f"Cited: {cites}"
def latex_strings(paper_list):
output = []
n = len(paper_list)
for i,p in enumerate(paper_list):
title = p.title[0]
entry = "\\textbf{" + f"{n-i}" + "}. " + '\\' + 'href{' + adsurl(p.bibcode) + "}{" + f"{latex_title_greek(title)}" + "}" + "\\\\"
entry += author_format(p.author)
entry += f" ({p.year}) "
entry += journal_name(p.bibcode)
entry += f" {p.volume},"
entry += f" {p.page[0]}."
entry += ' \\hspace*{\\fill}' + citation_formatter(p.citation_count) + "\\vspace{1mm}" + "\\\\"
output.append(entry)
output[0] = "\\noindent " + output[0]
return output
def export_latex(filename,latex_string_list):
f = open(filename,"w")
for line in latex_string_list:
f.write(line+"\n")
f.close()
return "Saved."
# Parse bibcodes
print("Parsing bibcodes...")
bibcodes = extract_bibcodes(first_author)
co_bibcodes = extract_bibcodes(co_author)
# Search for papers and their attributes from bibcodes
print("Querying the ADS API for paper metadata... This may take a while if there are many entries...")
papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in bibcodes]
co_papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in co_bibcodes]
# Remove Errata
## Because Ew. And if anyone cares about the paper content they'll discover errata when they visit the ADS pages.
print("Dropping Errata, Corrigenda...")
for p in papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
papers.remove(p)
for p in co_papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
co_papers.remove(p)
# Sum citations
first_author_cites = 0
co_author_cites = 0
for p in papers:
if p.citation_count is not None:
first_author_cites += p.citation_count
for p in co_papers:
if p.citation_count is not None:
co_author_cites += p.citation_count
# Compile LaTeX string
print("Compiling LaTeX strings...")
output = latex_strings(papers)
co_output = latex_strings(co_papers)
# Export to LaTeX
print("Exporting to LaTeX...")
export_latex("first_author.tex",output)
export_latex("co_author.tex",co_output)
print(f"\nThere are {len(papers)} first-author papers, and {len(co_papers)} co-authored papers.")
print(f"They have a total of {first_author_cites} and {co_author_cites} citations, respectively.")
print("\n\n.tex files prepared. Now run:\n")
print("\t pdflatex publications.tex\n\n\n") | 32.97931 | 367 | 0.622961 | import ads
ads.config.token = 'my token'
import numpy as np
en(filename)
full_list = f.readlines()
bibcodes = []
exclude = ['arXiv','tmp','yCat','PhDT','AAS','ASPC','BSRSL','conf','EPJWC','IAUFM','IAUGA','IAUS','hst','iue','jwst','spzr','prop']
for line in full_list:
if line[0] == "@":
if not any(x in line for x in exclude):
bibcodes.append(line.split("{")[1].replace(",\n",""))
return bibcodes
def author_format(authors):
if len(authors) == 1:
a = authors[0]
elif len(authors) == 2:
a = authors[0] + " \& " + authors[1]
else:
a = authors[0] + ' et al.'
return a
def journal_name(bibcode):
return bibcode.split(".")[0][4:].replace("&","\&")
def adsurl(bibcode):
return 'https://ui.adsabs.harvard.edu/abs/' + bibcode
def latex_title_greek(title):
greek_dict = {"α":r"$\alpha$", "β":r"$\beta$", "γ":r"$\gamma$", "δ":r"$\delta$", "ε":r"$\epsilon$", "ζ":r"$\zeta$", "η":r"$\eta$", "ι":r"$\iota$", "θ":r"$\theta$", "κ":r"$\kappa$", "λ":r"$\lambda$", "μ":r"$\mu$", "ν":r"$\nu$", "ξ":r"$\xi$", "π":r"$\pi$", "ρ":r"$\rho$", "σ":r"$\sigma$", "τ":r"$\tau$", "φ":r"$\phi$", "χ":r"$\chi$", "ψ":r"$\psi$", "ω":r"$\omega$"}
for key in greek_dict.keys():
title = title.replace(key, greek_dict[key])
return title
def citation_formatter(cites):
if cites is None:
return ""
elif cites == 0:
return ""
else:
if cites < 10:
return f"Cited: \\phantom" + "{1}" + f"{cites}"
else:
return f"Cited: {cites}"
def latex_strings(paper_list):
output = []
n = len(paper_list)
for i,p in enumerate(paper_list):
title = p.title[0]
entry = "\\textbf{" + f"{n-i}" + "}. " + '\\' + 'href{' + adsurl(p.bibcode) + "}{" + f"{latex_title_greek(title)}" + "}" + "\\\\"
entry += author_format(p.author)
entry += f" ({p.year}) "
entry += journal_name(p.bibcode)
entry += f" {p.volume},"
entry += f" {p.page[0]}."
entry += ' \\hspace*{\\fill}' + citation_formatter(p.citation_count) + "\\vspace{1mm}" + "\\\\"
output.append(entry)
output[0] = "\\noindent " + output[0]
return output
def export_latex(filename,latex_string_list):
f = open(filename,"w")
for line in latex_string_list:
f.write(line+"\n")
f.close()
return "Saved."
# Parse bibcodes
print("Parsing bibcodes...")
bibcodes = extract_bibcodes(first_author)
co_bibcodes = extract_bibcodes(co_author)
# Search for papers and their attributes from bibcodes
print("Querying the ADS API for paper metadata... This may take a while if there are many entries...")
papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in bibcodes]
co_papers = [list(ads.SearchQuery(bibcode=bibcode, fl=['bibcode', 'title', 'author', 'year', 'volume', 'page', 'citation_count']))[0] for bibcode in co_bibcodes]
# Remove Errata
## Because Ew. And if anyone cares about the paper content they'll discover errata when they visit the ADS pages.
print("Dropping Errata, Corrigenda...")
for p in papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
papers.remove(p)
for p in co_papers:
if "Erratum" in p.title[0] or "Corrigendum" in p.title[0]:
co_papers.remove(p)
first_author_cites = 0
co_author_cites = 0
for p in papers:
if p.citation_count is not None:
first_author_cites += p.citation_count
for p in co_papers:
if p.citation_count is not None:
co_author_cites += p.citation_count
print("Compiling LaTeX strings...")
output = latex_strings(papers)
co_output = latex_strings(co_papers)
print("Exporting to LaTeX...")
export_latex("first_author.tex",output)
export_latex("co_author.tex",co_output)
print(f"\nThere are {len(papers)} first-author papers, and {len(co_papers)} co-authored papers.")
print(f"They have a total of {first_author_cites} and {co_author_cites} citations, respectively.")
print("\n\n.tex files prepared. Now run:\n")
print("\t pdflatex publications.tex\n\n\n") | true | true |
f7265e57b6d0c1d7e79981da52b990daf420ccab | 7,670 | py | Python | tests/test_breadcrumbs.py | PavloKapyshin/paka.breadcrumbs | ae57d1a0d609ab39f81c4b0b44d6f7081602b079 | [
"BSD-3-Clause"
] | 1 | 2018-10-28T03:02:03.000Z | 2018-10-28T03:02:03.000Z | tests/test_breadcrumbs.py | PavloKapyshin/paka.breadcrumbs | ae57d1a0d609ab39f81c4b0b44d6f7081602b079 | [
"BSD-3-Clause"
] | null | null | null | tests/test_breadcrumbs.py | PavloKapyshin/paka.breadcrumbs | ae57d1a0d609ab39f81c4b0b44d6f7081602b079 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from paka.breadcrumbs import Bread, Crumb
class BreadcrumbsTest(unittest.TestCase):
def setUp(self):
self.site_name = "Some site Name"
def test_breadcrumbs_can_be_converted_to_list(self):
crumbs = list(Bread(self.site_name))
self.assertGreater(len(crumbs), 0)
def test_breadcrumbs_can_be_indexed(self):
self.assertIsInstance(Bread(self.site_name)[0], Crumb)
def test_default_site_crumb(self):
crumb, = Bread(self.site_name)
self.assertEqual(crumb.label, self.site_name)
self.assertEqual(crumb.heading, self.site_name)
self.assertEqual(crumb.url_path, "/")
self.assertEqual(crumb.extra, {})
def test_changed_site_url_path(self):
url_path = "/some/other/"
crumb, = Bread(self.site_name, url_path=url_path)
self.assertEqual(crumb.url_path, url_path)
def test_changed_site_heading(self):
heading = "something different"
crumb, = Bread(self.site_name, heading=heading)
self.assertEqual(crumb.label, self.site_name)
self.assertEqual(crumb.heading, heading)
def test_changed_site_extra(self):
extra = {"a": 1, "b": 2}
crumb, = Bread(self.site_name, extra=extra)
self.assertEqual(crumb.extra, extra)
def test_adding_is_done_in_correct_order(self):
bcrumbs = Bread(self.site_name)
label, heading, url_path, extra = "Label", "Heading", "/test/", {1: 2}
bcrumbs.add(label, heading=heading, url_path=url_path, extra=extra)
site_crumb, test_crumb = bcrumbs
self.assertEqual(site_crumb.label, self.site_name)
self.assertEqual(site_crumb.heading, self.site_name)
self.assertEqual(site_crumb.url_path, "/")
self.assertEqual(site_crumb.extra, {})
self.assertEqual(test_crumb.label, label)
self.assertEqual(test_crumb.heading, heading)
self.assertEqual(test_crumb.url_path, url_path)
self.assertEqual(test_crumb.extra, extra)
def test_adding_defaults(self):
label = "some label"
bcrumbs = Bread(self.site_name)
bcrumbs.add(label)
test_crumb = bcrumbs[1]
self.assertEqual(test_crumb.label, label)
self.assertEqual(test_crumb.heading, label)
self.assertIsNone(test_crumb.url_path)
self.assertEqual(test_crumb.extra, {})
def test_adding_crumb(self):
expected_crumb = Crumb(
"Crumb here", heading="H", url_path="url", extra={1: 2})
bcrumbs = Bread(self.site_name)
bcrumbs.add_crumb(expected_crumb)
site_crumb, test_crumb = bcrumbs
self.assertEqual(expected_crumb, test_crumb)
def test_from_crumb(self):
expected_crumb = Crumb(
"Crumb here", heading="H", url_path="url", extra={1: 2})
bcrumbs = Bread.from_crumb(expected_crumb)
crumb, = bcrumbs
self.assertEqual(expected_crumb, crumb)
def test_from_crumbs(self):
crumbs = (
Crumb(self.site_name, extra={1: "one"}, url_path="/"),
Crumb("Second", url_path="/second/"),
Crumb("Third"))
bcrumbs = Bread.from_crumbs(crumbs)
for expected, actual in zip_longest(crumbs, bcrumbs):
self.assertEqual(expected, actual)
def test_from_empty_crumbs(self):
with self.assertRaises(ValueError):
Bread.from_crumbs(())
class BreadcrumbsTitleTest(unittest.TestCase):
def setUp(self):
from markupsafe import Markup
from mako.template import Template
self.markup_class = Markup
self.template_class = Template
self.site_name = "Some site Name"
def test_getting_title_with_one_crumb(self):
bcrumbs = Bread(self.site_name)
result = bcrumbs.get_title("←")
self.assertEqual(result, self.site_name)
self.assertIsInstance(result, self.markup_class)
def test_getting_title_with_several_crumbs(self):
bcrumbs = Bread(self.site_name)
bcrumbs.add("Subsection", heading="something", url_path="/sub")
bcrumbs.add("<sub-sub>", heading="Subsubsection", url_path="/sub/sub")
bcrumbs.add("here")
cases = (
("sep", "here sep <sub-sub> sep Subsection sep {}"),
("←", "here ← <sub-sub> ← Subsection ← {}"),
("<", "here < <sub-sub> < Subsection < {}"),
(
"<",
"here &lt; <sub-sub> "
"&lt; Subsection &lt; {}"),
(
self.markup_class("<"),
"here < <sub-sub> < Subsection < {}"))
for sep, tmpl in cases:
result = bcrumbs.get_title(sep)
self.assertIsInstance(result, self.markup_class)
self.assertEqual(result, tmpl.format(self.site_name))
def test_template_rendering(self):
bcrumbs = Bread(self.site_name)
bcrumbs.add("Subsection", heading="something", url_path="/sub")
bcrumbs.add("<sub-sub>", heading="Subsubsection", url_path="/sub/sub")
bcrumbs.add("here")
title = bcrumbs.get_title("<")
expected = (
"<title>here < <sub-sub> < Subsection "
"< {}</title>").format(self.site_name)
template_string = "<title>${title}</title>"
self.assertEqual(
self.template_class(
template_string, default_filters=["h"]).render(title=title),
expected)
self.assertEqual(
self.template_class(template_string).render(title=title),
expected)
class CrumbTest(unittest.TestCase):
def test_empty_url_path_results_in_none(self):
crumb = Crumb("label", url_path="")
self.assertIsNone(crumb.url_path)
def test_equality_defaults(self):
args, kwargs = ("a", ), {}
crumb_a = Crumb(*args, **kwargs)
crumb_b = Crumb(*args, **kwargs)
self.assertEqual(crumb_a, crumb_b)
def test_equality_same_kwargs(self):
kwargs = {
"label": "Some label", "url_path": "/url/path",
"heading": "Same", "extra": {0: 1}}
crumb_a = Crumb(**kwargs)
crumb_b = Crumb(**kwargs)
self.assertEqual(crumb_a, crumb_b)
def test_equality_different_label(self):
same_kwargs = {
"url_path": "/url/path", "heading": "Same", "extra": {1: 2}}
crumb_a = Crumb(label="a", **same_kwargs)
crumb_b = Crumb(label="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_url_path(self):
same_kwargs = {
"label": "Same", "heading": "Same too", "extra": {3: 4}}
crumb_a = Crumb(url_path="a", **same_kwargs)
crumb_b = Crumb(url_path="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_heading(self):
same_kwargs = {
"url_path": "/url/path", "label": "Same", "extra": {5: 6}}
crumb_a = Crumb(heading="a", **same_kwargs)
crumb_b = Crumb(heading="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_extra(self):
same_kwargs = {
"url_path": "/url/path", "heading": "Same", "label": "Same too"}
crumb_a = Crumb(extra={"a": 1}, **same_kwargs)
crumb_b = Crumb(extra={"b": 2}, **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
| 36.52381 | 78 | 0.619426 |
from __future__ import unicode_literals
import unittest
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from paka.breadcrumbs import Bread, Crumb
class BreadcrumbsTest(unittest.TestCase):
def setUp(self):
self.site_name = "Some site Name"
def test_breadcrumbs_can_be_converted_to_list(self):
crumbs = list(Bread(self.site_name))
self.assertGreater(len(crumbs), 0)
def test_breadcrumbs_can_be_indexed(self):
self.assertIsInstance(Bread(self.site_name)[0], Crumb)
def test_default_site_crumb(self):
crumb, = Bread(self.site_name)
self.assertEqual(crumb.label, self.site_name)
self.assertEqual(crumb.heading, self.site_name)
self.assertEqual(crumb.url_path, "/")
self.assertEqual(crumb.extra, {})
def test_changed_site_url_path(self):
url_path = "/some/other/"
crumb, = Bread(self.site_name, url_path=url_path)
self.assertEqual(crumb.url_path, url_path)
def test_changed_site_heading(self):
heading = "something different"
crumb, = Bread(self.site_name, heading=heading)
self.assertEqual(crumb.label, self.site_name)
self.assertEqual(crumb.heading, heading)
def test_changed_site_extra(self):
extra = {"a": 1, "b": 2}
crumb, = Bread(self.site_name, extra=extra)
self.assertEqual(crumb.extra, extra)
def test_adding_is_done_in_correct_order(self):
bcrumbs = Bread(self.site_name)
label, heading, url_path, extra = "Label", "Heading", "/test/", {1: 2}
bcrumbs.add(label, heading=heading, url_path=url_path, extra=extra)
site_crumb, test_crumb = bcrumbs
self.assertEqual(site_crumb.label, self.site_name)
self.assertEqual(site_crumb.heading, self.site_name)
self.assertEqual(site_crumb.url_path, "/")
self.assertEqual(site_crumb.extra, {})
self.assertEqual(test_crumb.label, label)
self.assertEqual(test_crumb.heading, heading)
self.assertEqual(test_crumb.url_path, url_path)
self.assertEqual(test_crumb.extra, extra)
def test_adding_defaults(self):
label = "some label"
bcrumbs = Bread(self.site_name)
bcrumbs.add(label)
test_crumb = bcrumbs[1]
self.assertEqual(test_crumb.label, label)
self.assertEqual(test_crumb.heading, label)
self.assertIsNone(test_crumb.url_path)
self.assertEqual(test_crumb.extra, {})
def test_adding_crumb(self):
expected_crumb = Crumb(
"Crumb here", heading="H", url_path="url", extra={1: 2})
bcrumbs = Bread(self.site_name)
bcrumbs.add_crumb(expected_crumb)
site_crumb, test_crumb = bcrumbs
self.assertEqual(expected_crumb, test_crumb)
def test_from_crumb(self):
expected_crumb = Crumb(
"Crumb here", heading="H", url_path="url", extra={1: 2})
bcrumbs = Bread.from_crumb(expected_crumb)
crumb, = bcrumbs
self.assertEqual(expected_crumb, crumb)
def test_from_crumbs(self):
crumbs = (
Crumb(self.site_name, extra={1: "one"}, url_path="/"),
Crumb("Second", url_path="/second/"),
Crumb("Third"))
bcrumbs = Bread.from_crumbs(crumbs)
for expected, actual in zip_longest(crumbs, bcrumbs):
self.assertEqual(expected, actual)
def test_from_empty_crumbs(self):
with self.assertRaises(ValueError):
Bread.from_crumbs(())
class BreadcrumbsTitleTest(unittest.TestCase):
def setUp(self):
from markupsafe import Markup
from mako.template import Template
self.markup_class = Markup
self.template_class = Template
self.site_name = "Some site Name"
def test_getting_title_with_one_crumb(self):
bcrumbs = Bread(self.site_name)
result = bcrumbs.get_title("←")
self.assertEqual(result, self.site_name)
self.assertIsInstance(result, self.markup_class)
def test_getting_title_with_several_crumbs(self):
bcrumbs = Bread(self.site_name)
bcrumbs.add("Subsection", heading="something", url_path="/sub")
bcrumbs.add("<sub-sub>", heading="Subsubsection", url_path="/sub/sub")
bcrumbs.add("here")
cases = (
("sep", "here sep <sub-sub> sep Subsection sep {}"),
("←", "here ← <sub-sub> ← Subsection ← {}"),
("<", "here < <sub-sub> < Subsection < {}"),
(
"<",
"here &lt; <sub-sub> "
"&lt; Subsection &lt; {}"),
(
self.markup_class("<"),
"here < <sub-sub> < Subsection < {}"))
for sep, tmpl in cases:
result = bcrumbs.get_title(sep)
self.assertIsInstance(result, self.markup_class)
self.assertEqual(result, tmpl.format(self.site_name))
def test_template_rendering(self):
bcrumbs = Bread(self.site_name)
bcrumbs.add("Subsection", heading="something", url_path="/sub")
bcrumbs.add("<sub-sub>", heading="Subsubsection", url_path="/sub/sub")
bcrumbs.add("here")
title = bcrumbs.get_title("<")
expected = (
"<title>here < <sub-sub> < Subsection "
"< {}</title>").format(self.site_name)
template_string = "<title>${title}</title>"
self.assertEqual(
self.template_class(
template_string, default_filters=["h"]).render(title=title),
expected)
self.assertEqual(
self.template_class(template_string).render(title=title),
expected)
class CrumbTest(unittest.TestCase):
def test_empty_url_path_results_in_none(self):
crumb = Crumb("label", url_path="")
self.assertIsNone(crumb.url_path)
def test_equality_defaults(self):
args, kwargs = ("a", ), {}
crumb_a = Crumb(*args, **kwargs)
crumb_b = Crumb(*args, **kwargs)
self.assertEqual(crumb_a, crumb_b)
def test_equality_same_kwargs(self):
kwargs = {
"label": "Some label", "url_path": "/url/path",
"heading": "Same", "extra": {0: 1}}
crumb_a = Crumb(**kwargs)
crumb_b = Crumb(**kwargs)
self.assertEqual(crumb_a, crumb_b)
def test_equality_different_label(self):
same_kwargs = {
"url_path": "/url/path", "heading": "Same", "extra": {1: 2}}
crumb_a = Crumb(label="a", **same_kwargs)
crumb_b = Crumb(label="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_url_path(self):
same_kwargs = {
"label": "Same", "heading": "Same too", "extra": {3: 4}}
crumb_a = Crumb(url_path="a", **same_kwargs)
crumb_b = Crumb(url_path="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_heading(self):
same_kwargs = {
"url_path": "/url/path", "label": "Same", "extra": {5: 6}}
crumb_a = Crumb(heading="a", **same_kwargs)
crumb_b = Crumb(heading="b", **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
def test_equality_different_extra(self):
same_kwargs = {
"url_path": "/url/path", "heading": "Same", "label": "Same too"}
crumb_a = Crumb(extra={"a": 1}, **same_kwargs)
crumb_b = Crumb(extra={"b": 2}, **same_kwargs)
self.assertNotEqual(crumb_a, crumb_b)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.