code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from opera.constants import OperationHost, StandardInterfaceOperation, ConfigureInterfaceOperation
from opera.error import DataError
from opera.template.capability import Capability
from opera.template.interface import Interface
from opera.template.operation import Operation
class CollectorMixin:
def collect_types(self, service_ast):
typ = self.type.resolve_reference(service_ast)
return (self.type.data,) + typ.collect_types(service_ast)
def collect_properties(self, service_ast):
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_property_definitions(service_ast)
assignments = self.get("properties", {})
undeclared_props = set(assignments.keys()) - definitions.keys()
if undeclared_props:
self.abort(f"Invalid properties: {', '.join(undeclared_props)}.", self.loc)
for key, prop_definition in definitions.items():
prop_required = prop_definition.get("required", None)
prop_has_default = prop_definition.get("default", None)
prop_assignment = assignments.get(key, None)
if prop_required:
prop_required = prop_required.data
else:
prop_required = True
if prop_required and not prop_has_default and not prop_assignment:
self.abort(
f"Missing a required property: {key}. If the property is optional please specify this in the "
f"definition with 'required: false' or supply its default value using 'default: <value>'.", self.loc
)
return {
name: (assignments.get(name) or definition).get_value(definition.get_value_type(service_ast))
for name, definition in definitions.items()
}
def collect_attributes(self, service_ast):
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_attribute_definitions(service_ast)
assignments = self.get("attributes", {})
undeclared_attrs = set(assignments.keys()) - definitions.keys()
if undeclared_attrs:
self.abort(f"Invalid attributes: {', '.join(undeclared_attrs)}.", self.loc)
return {
name: (assignments.get(name) or definition).get_value(
definition.get_value_type(service_ast),
) for name, definition in definitions.items()
}
def collect_interfaces(self, service_ast): # pylint: disable=too-many-locals
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_interface_definitions(service_ast)
assignments = self.get("interfaces", {})
undeclared_interfaces = set(assignments.keys()) - definitions.keys()
if undeclared_interfaces:
self.abort(f"Undeclared interfaces: {', '.join(undeclared_interfaces)}.", self.loc)
# Next section is nasty. You have been warned.
interfaces = {}
for name, definition in definitions.items():
assignment = self.dig("interfaces", name) or {}
defined_operations = definition.get("operations", {})
assigned_operations = assignment.get("operations", {})
undeclared_operations = set(assigned_operations.keys()) - defined_operations.keys()
if undeclared_operations:
self.abort(f"Undeclared operations: {', '.join(undeclared_operations)}.", self.loc)
operations = {}
for op_name, op_definition in defined_operations.items():
op_assignment = assigned_operations.get(name, {})
undeclared_inputs = set()
# Inputs come from four different sources:
# 1. interface definition,
# 2. interface operation definition,
# 3. interface assignment in template section, and
# 4. interface operation assignment in template section.
inputs = {
k: v.get_value(v.get_value_type(service_ast))
for k, v in definition.get("inputs", {}).items()
}
inputs.update({
k: v.get_value(v.get_value_type(service_ast))
for k, v in op_definition.get("inputs", {}).items()
})
for k, v in assignment.get("inputs", {}).items():
if k not in inputs:
undeclared_inputs.add(k)
else:
inputs[k] = v.get_value(inputs[k].type)
for k, v in op_assignment.get("inputs", {}).items():
if k not in inputs:
undeclared_inputs.add(k)
else:
inputs[k] = v.get_value(inputs[k].type)
if undeclared_inputs:
self.abort(f"Undeclared inputs: {', '.join(undeclared_inputs)}.", self.loc)
# Outputs, which define the attribute mapping, come from:
# 1. interface operation definition,
# 2. interface operation assignment in template section
outputs = {
k: [s.data for s in v.data]
for k, v in op_definition.get("outputs", {}).items()
}
outputs.update({
k: [s.data for s in v.data]
for k, v in op_assignment.get("outputs", {}).items()
})
# Operation implementation details
impl = op_assignment.get("implementation") or op_definition.get("implementation")
# TODO: when impl is None we also pass that forward to operation objects. Fix this if needed.
timeout, operation_host = 0, None
if impl and "timeout" in impl:
timeout = impl.timeout.data
if impl and "operation_host" in impl:
operation_host_value = impl.operation_host.data
try:
operation_host = next(oh for oh in OperationHost if oh.value == operation_host_value)
except StopIteration as e:
raise DataError(
f"Could not find operation host {operation_host_value} in {list(OperationHost)}"
) from e
operations[op_name] = Operation(
op_name,
primary=impl.primary.file.data if impl else None,
dependencies=[d.file.data for d in impl.get("dependencies", [])] if impl else [],
artifacts=[a.data for a in self.collect_artifacts(service_ast).values()],
inputs=inputs,
outputs=outputs,
timeout=timeout,
host=operation_host,
)
# unify Standard and Configure interfaces with type_uri to use only shorthand_name
if name == StandardInterfaceOperation.type_uri():
name = StandardInterfaceOperation.shorthand_name()
if name == ConfigureInterfaceOperation.type_uri():
name = ConfigureInterfaceOperation.shorthand_name()
interfaces[name] = Interface(name, operations)
return dict(interfaces)
def collect_capabilities(self, service_ast):
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_capability_definitions(service_ast)
assignments = self.get("capabilities", {})
undeclared_caps = set(assignments.keys()) - definitions.keys()
if undeclared_caps:
self.abort(f"Invalid capabilities: {', '.join(undeclared_caps)}.", self.loc)
return [
Capability(name, assignment.get("properties", None), assignment.get("attributes", None))
for name, assignment in assignments.items()
]
def collect_artifacts(self, service_ast):
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_artifact_definitions(service_ast)
assignments = self.get("artifacts", {})
duplicate_interfaces = set(assignments.keys()).intersection(
definitions.keys())
if duplicate_interfaces:
for duplicate in duplicate_interfaces:
definitions.pop(duplicate)
definitions.update(assignments)
return {
name: (assignments.get(name) or definition).get_value(
definition.get_value_type(service_ast),
) for name, definition in definitions.items()
} | src/opera/parser/tosca/v_1_3/collector_mixin.py | from opera.constants import OperationHost, StandardInterfaceOperation, ConfigureInterfaceOperation
from opera.error import DataError
from opera.template.capability import Capability
from opera.template.interface import Interface
from opera.template.operation import Operation
class CollectorMixin:
def collect_types(self, service_ast):
typ = self.type.resolve_reference(service_ast)
return (self.type.data,) + typ.collect_types(service_ast)
def collect_properties(self, service_ast):
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_property_definitions(service_ast)
assignments = self.get("properties", {})
undeclared_props = set(assignments.keys()) - definitions.keys()
if undeclared_props:
self.abort(f"Invalid properties: {', '.join(undeclared_props)}.", self.loc)
for key, prop_definition in definitions.items():
prop_required = prop_definition.get("required", None)
prop_has_default = prop_definition.get("default", None)
prop_assignment = assignments.get(key, None)
if prop_required:
prop_required = prop_required.data
else:
prop_required = True
if prop_required and not prop_has_default and not prop_assignment:
self.abort(
f"Missing a required property: {key}. If the property is optional please specify this in the "
f"definition with 'required: false' or supply its default value using 'default: <value>'.", self.loc
)
return {
name: (assignments.get(name) or definition).get_value(definition.get_value_type(service_ast))
for name, definition in definitions.items()
}
def collect_attributes(self, service_ast):
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_attribute_definitions(service_ast)
assignments = self.get("attributes", {})
undeclared_attrs = set(assignments.keys()) - definitions.keys()
if undeclared_attrs:
self.abort(f"Invalid attributes: {', '.join(undeclared_attrs)}.", self.loc)
return {
name: (assignments.get(name) or definition).get_value(
definition.get_value_type(service_ast),
) for name, definition in definitions.items()
}
def collect_interfaces(self, service_ast): # pylint: disable=too-many-locals
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_interface_definitions(service_ast)
assignments = self.get("interfaces", {})
undeclared_interfaces = set(assignments.keys()) - definitions.keys()
if undeclared_interfaces:
self.abort(f"Undeclared interfaces: {', '.join(undeclared_interfaces)}.", self.loc)
# Next section is nasty. You have been warned.
interfaces = {}
for name, definition in definitions.items():
assignment = self.dig("interfaces", name) or {}
defined_operations = definition.get("operations", {})
assigned_operations = assignment.get("operations", {})
undeclared_operations = set(assigned_operations.keys()) - defined_operations.keys()
if undeclared_operations:
self.abort(f"Undeclared operations: {', '.join(undeclared_operations)}.", self.loc)
operations = {}
for op_name, op_definition in defined_operations.items():
op_assignment = assigned_operations.get(name, {})
undeclared_inputs = set()
# Inputs come from four different sources:
# 1. interface definition,
# 2. interface operation definition,
# 3. interface assignment in template section, and
# 4. interface operation assignment in template section.
inputs = {
k: v.get_value(v.get_value_type(service_ast))
for k, v in definition.get("inputs", {}).items()
}
inputs.update({
k: v.get_value(v.get_value_type(service_ast))
for k, v in op_definition.get("inputs", {}).items()
})
for k, v in assignment.get("inputs", {}).items():
if k not in inputs:
undeclared_inputs.add(k)
else:
inputs[k] = v.get_value(inputs[k].type)
for k, v in op_assignment.get("inputs", {}).items():
if k not in inputs:
undeclared_inputs.add(k)
else:
inputs[k] = v.get_value(inputs[k].type)
if undeclared_inputs:
self.abort(f"Undeclared inputs: {', '.join(undeclared_inputs)}.", self.loc)
# Outputs, which define the attribute mapping, come from:
# 1. interface operation definition,
# 2. interface operation assignment in template section
outputs = {
k: [s.data for s in v.data]
for k, v in op_definition.get("outputs", {}).items()
}
outputs.update({
k: [s.data for s in v.data]
for k, v in op_assignment.get("outputs", {}).items()
})
# Operation implementation details
impl = op_assignment.get("implementation") or op_definition.get("implementation")
# TODO: when impl is None we also pass that forward to operation objects. Fix this if needed.
timeout, operation_host = 0, None
if impl and "timeout" in impl:
timeout = impl.timeout.data
if impl and "operation_host" in impl:
operation_host_value = impl.operation_host.data
try:
operation_host = next(oh for oh in OperationHost if oh.value == operation_host_value)
except StopIteration as e:
raise DataError(
f"Could not find operation host {operation_host_value} in {list(OperationHost)}"
) from e
operations[op_name] = Operation(
op_name,
primary=impl.primary.file.data if impl else None,
dependencies=[d.file.data for d in impl.get("dependencies", [])] if impl else [],
artifacts=[a.data for a in self.collect_artifacts(service_ast).values()],
inputs=inputs,
outputs=outputs,
timeout=timeout,
host=operation_host,
)
# unify Standard and Configure interfaces with type_uri to use only shorthand_name
if name == StandardInterfaceOperation.type_uri():
name = StandardInterfaceOperation.shorthand_name()
if name == ConfigureInterfaceOperation.type_uri():
name = ConfigureInterfaceOperation.shorthand_name()
interfaces[name] = Interface(name, operations)
return dict(interfaces)
def collect_capabilities(self, service_ast):
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_capability_definitions(service_ast)
assignments = self.get("capabilities", {})
undeclared_caps = set(assignments.keys()) - definitions.keys()
if undeclared_caps:
self.abort(f"Invalid capabilities: {', '.join(undeclared_caps)}.", self.loc)
return [
Capability(name, assignment.get("properties", None), assignment.get("attributes", None))
for name, assignment in assignments.items()
]
def collect_artifacts(self, service_ast):
typ = self.type.resolve_reference(service_ast)
definitions = typ.collect_artifact_definitions(service_ast)
assignments = self.get("artifacts", {})
duplicate_interfaces = set(assignments.keys()).intersection(
definitions.keys())
if duplicate_interfaces:
for duplicate in duplicate_interfaces:
definitions.pop(duplicate)
definitions.update(assignments)
return {
name: (assignments.get(name) or definition).get_value(
definition.get_value_type(service_ast),
) for name, definition in definitions.items()
} | 0.499268 | 0.272375 |
import pygame
import os
import math
import numpy as np
from board import *
pygame.init()
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.display.set_caption('Dots and Boxes AI')
screen = pygame.display.set_mode((WIDTH, HEIGHT))
# top, bottom, left, right
board = np.zeros((n - 1, m - 1, 5), dtype=object)
# -1 human, 1 ai
turn = -1
def draw_dots():
for i in range(m):
for j in range(n):
pygame.draw.circle(screen, BLACK, (i * WIDTH // m + OFFSET, j * HEIGHT // n + OFFSET), 7)
def draw_line(i, j, direction, colour):
x1 = i * WIDTH // m + OFFSET
x2 = x1 + WIDTH // m
y1 = j * HEIGHT // n + OFFSET
y2 = y1 + HEIGHT // n
if direction == DIRECTION['top']:
pygame.draw.line(screen, colour, (x1, y1), (x2, y1), THICC)
elif direction == DIRECTION['bottom']:
pygame.draw.line(screen, colour, (x1, y2), (x2, y2), THICC)
elif direction == DIRECTION['left']:
pygame.draw.line(screen, colour, (x1, y1), (x1, y2), THICC)
if direction == DIRECTION['right']:
pygame.draw.line(screen, colour, (x2, y1), (x2, y2), THICC)
def draw_board():
global board
for i in range(m - 1):
for j in range(n - 1):
for index, line in enumerate(board[j, i, :4]):
try:
# Check if the lines are drawn
if line[0] != 0:
draw_line(i, j, index, line[1])
except TypeError:
# line = [drawn, colour]
board[j, i, index] = (0, 0)
def colour_box():
for i in range(m - 1):
for j in range(n - 1):
# Check if the box is full
full = board[j, i][4]
if full:
pygame.draw.rect(screen, COLOURS[full * 2],
(i * WIDTH // m + OFFSET + THICC - 1, j * HEIGHT // n + OFFSET + THICC - 1,
WIDTH // m - THICC,
HEIGHT // n - THICC))
def draw_text(size, text, colour, x, y):
font = pygame.font.SysFont('Comic Sans MS', size)
text_surface = font.render(text, 1, colour)
text_rect = text_surface.get_rect(center=(x, y))
screen.blit(text_surface, text_rect)
def reset():
global board, turn, playing, winner
winner = WHITE
board = np.zeros((n - 1, m - 1, 5), dtype=object)
draw_board()
playing = True
SCORES[1] = 0
SCORES[-1] = 0
turn = -1
# Convert board
draw_board()
playing = True
winner = WHITE
while True:
screen.fill(winner)
mouse_pos = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quit()
if event.type == pygame.MOUSEBUTTONDOWN:
assert math.fabs(turn) == 1
if event.button == pygame.BUTTON_LEFT:
if place_line(board, mouse_pos, turn) and not check_full(board, turn):
turn *= -1
if event.button == pygame.BUTTON_RIGHT:
reset()
if playing:
place_line(board, mouse_pos, turn * 2, True)
draw_board()
colour_box()
draw_dots()
draw_text(45, f'RED {SCORES[1]}', RED, WIDTH // 2, HEIGHT - 40)
draw_text(45, f'BLUE {SCORES[-1]}', BLUE, WIDTH // 2, HEIGHT - 10)
if check_board_full(board):
playing = False
if SCORES[-1] > SCORES[1]:
winner = COLOURS[-1]
elif SCORES[-1] == SCORES[1]:
winner = WHITE
else:
winner = COLOURS[1]
pygame.display.flip() | main.py | import pygame
import os
import math
import numpy as np
from board import *
pygame.init()
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.display.set_caption('Dots and Boxes AI')
screen = pygame.display.set_mode((WIDTH, HEIGHT))
# top, bottom, left, right
board = np.zeros((n - 1, m - 1, 5), dtype=object)
# -1 human, 1 ai
turn = -1
def draw_dots():
for i in range(m):
for j in range(n):
pygame.draw.circle(screen, BLACK, (i * WIDTH // m + OFFSET, j * HEIGHT // n + OFFSET), 7)
def draw_line(i, j, direction, colour):
x1 = i * WIDTH // m + OFFSET
x2 = x1 + WIDTH // m
y1 = j * HEIGHT // n + OFFSET
y2 = y1 + HEIGHT // n
if direction == DIRECTION['top']:
pygame.draw.line(screen, colour, (x1, y1), (x2, y1), THICC)
elif direction == DIRECTION['bottom']:
pygame.draw.line(screen, colour, (x1, y2), (x2, y2), THICC)
elif direction == DIRECTION['left']:
pygame.draw.line(screen, colour, (x1, y1), (x1, y2), THICC)
if direction == DIRECTION['right']:
pygame.draw.line(screen, colour, (x2, y1), (x2, y2), THICC)
def draw_board():
global board
for i in range(m - 1):
for j in range(n - 1):
for index, line in enumerate(board[j, i, :4]):
try:
# Check if the lines are drawn
if line[0] != 0:
draw_line(i, j, index, line[1])
except TypeError:
# line = [drawn, colour]
board[j, i, index] = (0, 0)
def colour_box():
for i in range(m - 1):
for j in range(n - 1):
# Check if the box is full
full = board[j, i][4]
if full:
pygame.draw.rect(screen, COLOURS[full * 2],
(i * WIDTH // m + OFFSET + THICC - 1, j * HEIGHT // n + OFFSET + THICC - 1,
WIDTH // m - THICC,
HEIGHT // n - THICC))
def draw_text(size, text, colour, x, y):
font = pygame.font.SysFont('Comic Sans MS', size)
text_surface = font.render(text, 1, colour)
text_rect = text_surface.get_rect(center=(x, y))
screen.blit(text_surface, text_rect)
def reset():
global board, turn, playing, winner
winner = WHITE
board = np.zeros((n - 1, m - 1, 5), dtype=object)
draw_board()
playing = True
SCORES[1] = 0
SCORES[-1] = 0
turn = -1
# Convert board
draw_board()
playing = True
winner = WHITE
while True:
screen.fill(winner)
mouse_pos = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quit()
if event.type == pygame.MOUSEBUTTONDOWN:
assert math.fabs(turn) == 1
if event.button == pygame.BUTTON_LEFT:
if place_line(board, mouse_pos, turn) and not check_full(board, turn):
turn *= -1
if event.button == pygame.BUTTON_RIGHT:
reset()
if playing:
place_line(board, mouse_pos, turn * 2, True)
draw_board()
colour_box()
draw_dots()
draw_text(45, f'RED {SCORES[1]}', RED, WIDTH // 2, HEIGHT - 40)
draw_text(45, f'BLUE {SCORES[-1]}', BLUE, WIDTH // 2, HEIGHT - 10)
if check_board_full(board):
playing = False
if SCORES[-1] > SCORES[1]:
winner = COLOURS[-1]
elif SCORES[-1] == SCORES[1]:
winner = WHITE
else:
winner = COLOURS[1]
pygame.display.flip() | 0.291586 | 0.257785 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class UpdateGA(base.UpdateCommand):
"""Updates the definition of a logs-based metric."""
detailed_help = {
'DESCRIPTION': """\
Updates the description or the filter expression of an existing
logs-based metric.
""",
'EXAMPLES': """\
To update the description of a metric called high_severity_count, run:
$ {command} high_severity_count --description="Count of high-severity log entries."
To update the filter expression of the metric, run:
$ {command} high_severity_count --log-filter="severity >= WARNING"
Detailed information about filters can be found at:
[](https://cloud.google.com/logging/docs/view/advanced_filters)
""",
}
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'metric_name', help='The name of the log-based metric to update.')
config_group = parser.add_argument_group(
help='Data about the metric to update.',
required=True
)
config_group.add_argument(
'--description',
help=('A new description for the metric. '
'If omitted, the description is not changed.'))
config_group.add_argument(
'--log-filter',
help=('A new filter string for the metric. '
'If omitted, the filter is not changed.'))
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to
this command invocation.
Returns:
The updated metric.
"""
# Calling the API's Update method on a non-existing metric creates it.
# Make sure the metric exists so we don't accidentally create it.
metric = util.GetClient().projects_metrics.Get(
util.GetMessages().LoggingProjectsMetricsGetRequest(
metricName=util.CreateResourceName(
util.GetCurrentProjectParent(), 'metrics', args.metric_name)))
updated_metric = util.UpdateLogMetric(
metric,
description=args.description,
log_filter=args.log_filter)
result = util.GetClient().projects_metrics.Update(
util.GetMessages().LoggingProjectsMetricsUpdateRequest(
metricName=util.CreateResourceName(
util.GetCurrentProjectParent(), 'metrics', args.metric_name),
logMetric=updated_metric))
log.UpdatedResource(args.metric_name)
return result
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class UpdateBeta(base.UpdateCommand):
"""Updates the definition of a logs-based metric."""
detailed_help = {
'DESCRIPTION': """\
Updates the description or the filter expression of an existing
logs-based metric.
""",
'EXAMPLES': """\
To update the description of a metric called high_severity_count, run:
$ {command} high_severity_count --description="Count of high-severity log entries."
To update the filter expression of the metric, run:
$ {command} high_severity_count --log-filter="severity >= WARNING"
Detailed information about filters can be found at:
[](https://cloud.google.com/logging/docs/view/advanced_filters)
For advanced features such as user-defined labels and distribution
metrics, update using a config file:
$ {command} high_severity_count --config-from-file=$PATH_TO_FILE
The config file should be in YAML format. Detailed information about
how to configure metrics can be found at: [](https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric).
Any top-level fields in the LogMetric definition that aren't specified
in the config file will not be updated in the metric.
""",
}
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'metric_name', help='The name of the log-based metric to update.')
config_group = parser.add_argument_group(
help='Data about the metric to update.',
mutex=True,
required=True)
legacy_mode_group = config_group.add_argument_group(
help=('Arguments to specify information about simple counter logs-'
'based metrics.'))
legacy_mode_group.add_argument(
'--description', required=False,
help=('A new description for the metric. '
'If omitted, the description is not changed.'))
legacy_mode_group.add_argument(
'--log-filter', required=False,
help=('A new filter string for the metric. '
'If omitted, the filter is not changed.'))
config_group.add_argument('--config-from-file',
help=('A path to a YAML file specifying the '
'updates to be made to the logs-based '
'metric.'),
type=arg_parsers.FileContents())
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to
this command invocation.
Returns:
The updated metric.
"""
# Calling the API's Update method on a non-existing metric creates it.
# Make sure the metric exists so we don't accidentally create it.
metric = util.GetClient().projects_metrics.Get(
util.GetMessages().LoggingProjectsMetricsGetRequest(
metricName=util.CreateResourceName(
util.GetCurrentProjectParent(), 'metrics', args.metric_name)))
updated_metric = util.UpdateLogMetric(metric,
args.description,
args.log_filter,
args.config_from_file)
result = util.GetClient().projects_metrics.Update(
util.GetMessages().LoggingProjectsMetricsUpdateRequest(
metricName=util.CreateResourceName(
util.GetCurrentProjectParent(), 'metrics', args.metric_name),
logMetric=updated_metric))
log.UpdatedResource(args.metric_name)
return result | lib/surface/logging/metrics/update.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class UpdateGA(base.UpdateCommand):
"""Updates the definition of a logs-based metric."""
detailed_help = {
'DESCRIPTION': """\
Updates the description or the filter expression of an existing
logs-based metric.
""",
'EXAMPLES': """\
To update the description of a metric called high_severity_count, run:
$ {command} high_severity_count --description="Count of high-severity log entries."
To update the filter expression of the metric, run:
$ {command} high_severity_count --log-filter="severity >= WARNING"
Detailed information about filters can be found at:
[](https://cloud.google.com/logging/docs/view/advanced_filters)
""",
}
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'metric_name', help='The name of the log-based metric to update.')
config_group = parser.add_argument_group(
help='Data about the metric to update.',
required=True
)
config_group.add_argument(
'--description',
help=('A new description for the metric. '
'If omitted, the description is not changed.'))
config_group.add_argument(
'--log-filter',
help=('A new filter string for the metric. '
'If omitted, the filter is not changed.'))
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to
this command invocation.
Returns:
The updated metric.
"""
# Calling the API's Update method on a non-existing metric creates it.
# Make sure the metric exists so we don't accidentally create it.
metric = util.GetClient().projects_metrics.Get(
util.GetMessages().LoggingProjectsMetricsGetRequest(
metricName=util.CreateResourceName(
util.GetCurrentProjectParent(), 'metrics', args.metric_name)))
updated_metric = util.UpdateLogMetric(
metric,
description=args.description,
log_filter=args.log_filter)
result = util.GetClient().projects_metrics.Update(
util.GetMessages().LoggingProjectsMetricsUpdateRequest(
metricName=util.CreateResourceName(
util.GetCurrentProjectParent(), 'metrics', args.metric_name),
logMetric=updated_metric))
log.UpdatedResource(args.metric_name)
return result
@base.ReleaseTracks(base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA)
class UpdateBeta(base.UpdateCommand):
"""Updates the definition of a logs-based metric."""
detailed_help = {
'DESCRIPTION': """\
Updates the description or the filter expression of an existing
logs-based metric.
""",
'EXAMPLES': """\
To update the description of a metric called high_severity_count, run:
$ {command} high_severity_count --description="Count of high-severity log entries."
To update the filter expression of the metric, run:
$ {command} high_severity_count --log-filter="severity >= WARNING"
Detailed information about filters can be found at:
[](https://cloud.google.com/logging/docs/view/advanced_filters)
For advanced features such as user-defined labels and distribution
metrics, update using a config file:
$ {command} high_severity_count --config-from-file=$PATH_TO_FILE
The config file should be in YAML format. Detailed information about
how to configure metrics can be found at: [](https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric).
Any top-level fields in the LogMetric definition that aren't specified
in the config file will not be updated in the metric.
""",
}
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'metric_name', help='The name of the log-based metric to update.')
config_group = parser.add_argument_group(
help='Data about the metric to update.',
mutex=True,
required=True)
legacy_mode_group = config_group.add_argument_group(
help=('Arguments to specify information about simple counter logs-'
'based metrics.'))
legacy_mode_group.add_argument(
'--description', required=False,
help=('A new description for the metric. '
'If omitted, the description is not changed.'))
legacy_mode_group.add_argument(
'--log-filter', required=False,
help=('A new filter string for the metric. '
'If omitted, the filter is not changed.'))
config_group.add_argument('--config-from-file',
help=('A path to a YAML file specifying the '
'updates to be made to the logs-based '
'metric.'),
type=arg_parsers.FileContents())
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to
this command invocation.
Returns:
The updated metric.
"""
# Calling the API's Update method on a non-existing metric creates it.
# Make sure the metric exists so we don't accidentally create it.
metric = util.GetClient().projects_metrics.Get(
util.GetMessages().LoggingProjectsMetricsGetRequest(
metricName=util.CreateResourceName(
util.GetCurrentProjectParent(), 'metrics', args.metric_name)))
updated_metric = util.UpdateLogMetric(metric,
args.description,
args.log_filter,
args.config_from_file)
result = util.GetClient().projects_metrics.Update(
util.GetMessages().LoggingProjectsMetricsUpdateRequest(
metricName=util.CreateResourceName(
util.GetCurrentProjectParent(), 'metrics', args.metric_name),
logMetric=updated_metric))
log.UpdatedResource(args.metric_name)
return result | 0.903249 | 0.210746 |
import torch
from ..net import zero_gather
class FusedExpertsNetwork(torch.nn.Module):
def __init__(self, hidden_size_per_expert, activation_fn=None, activation_fn_with_self=None, output_dim=None):
super().__init__()
self.skip_expert = (int(torch.os.environ.get('SKIP_EXPERT', '0')) != 0)
self.hidden_size_per_expert = hidden_size_per_expert
self.output_dim = output_dim
if activation_fn_with_self is not None:
assert activation_fn is None, "Option `activation_fn_with_self` has been specified, please keep exactly one of them."
activation_fn = lambda x: activation_fn_with_self(x, self)
if activation_fn is None:
activation_fn = lambda x: F.relu(x)
self.activation_fn = activation_fn
def update(self, ctx):
if ctx.sharded_count > 1:
assert self.hidden_size_per_expert % ctx.sharded_count == 0, f"Can't evenly divide hidden_size_per_expert ({self.hidden_size_per_expert}) to {ctx.sharded_count} slices."
hidden_size = self.hidden_size_per_expert // ctx.sharded_count
model_dim = ctx.model_dim
local_experts = ctx.num_local_experts
self.output_dim = self.output_dim or model_dim
fc1_weight = torch.empty(1, local_experts, hidden_size, model_dim)
fc2_weight = torch.empty(1, local_experts, hidden_size, self.output_dim)
fc1_bias = torch.empty(1, local_experts, 1, hidden_size)
fc2_bias = torch.empty(1, local_experts, 1, (self.output_dim + ctx.sharded_count - 1) // ctx.sharded_count)
for i in range(local_experts):
fc1 = torch.nn.Linear(model_dim, hidden_size)
fc2 = torch.nn.Linear(hidden_size, self.output_dim)
fc1_weight[0, i, :, :], fc1_bias[0, i, :, :] = fc1.weight, fc1.bias
fc2_weight[0, i, :, :], fc2_bias[0, i, :, :] = fc2.weight.t(), fc2.bias[:fc2_bias.size(-1)]
self.register_parameter(name='batched_fc1_w', param=torch.nn.Parameter(fc1_weight.squeeze(0)))
self.register_parameter(name='batched_fc2_w', param=torch.nn.Parameter(fc2_weight.squeeze(0)))
self.register_parameter(name='batched_fc1_bias', param=torch.nn.Parameter(fc1_bias.squeeze(0)))
self.register_parameter(name='batched_fc2_bias', param=torch.nn.Parameter(fc2_bias.squeeze(0)))
def extra_repr(self):
return 'model_dim=%d, hidden_size=%d, output_dim=%d, local_experts=%d' % (
self.batched_fc1_w.size(2), self.batched_fc1_w.size(1), self.batched_fc2_w.size(2), self.batched_fc1_w.size(0)
)
def forward(self, x, ctx):
if self.skip_expert:
return x
batched_fc1_w = self.batched_fc1_w
batched_fc2_w = self.batched_fc2_w
batched_fc1_bias = self.batched_fc1_bias
batched_fc2_bias = self.batched_fc2_bias
if ctx.ffn_zero_group is not None:
if not ctx.use_model_parallel:
batched_fc1_w = zero_gather(batched_fc1_w, group=ctx.ffn_zero_group).view(1, -1, ctx.model_dim)
batched_fc2_w = zero_gather(batched_fc2_w, group=ctx.ffn_zero_group).view(1, -1, self.output_dim)
batched_fc1_bias = zero_gather(batched_fc1_bias, group=ctx.ffn_zero_group).view(1, 1, -1)
batched_fc2_bias = zero_gather(batched_fc2_bias, group=ctx.ffn_zero_group)
batched_fc2_bias = batched_fc2_bias.view(self.batched_fc2_bias.size(0), self.batched_fc2_bias.size(1), -1)
if batched_fc2_bias.size(-1) != self.output_dim:
batched_fc2_bias = batched_fc2_bias[:, :, :self.output_dim]
if ctx.use_model_parallel:
batched_fc2_bias = torch.mul(batched_fc2_bias, 1.0 / ctx.sharded_count)
y = torch.add(torch.matmul(x, batched_fc1_w.permute(0, 2, 1)), batched_fc1_bias)
y = self.activation_fn(y)
y = torch.add(torch.matmul(y, batched_fc2_w), batched_fc2_bias)
return y
def to(self, *args, **kwargs):
self = super().to(*args, **kwargs)
self.fc1_weight = self.fc1_weight.to(*args, **kwargs)
self.fc2_weight = self.fc2_weight.to(*args, **kwargs)
self.fc1_bias = self.fc1_bias.to(*args, **kwargs)
self.fc2_bias = self.fc2_bias.to(*args, **kwargs)
return self
ExpertModule = FusedExpertsNetwork | tutel/experts/ffn.py |
import torch
from ..net import zero_gather
class FusedExpertsNetwork(torch.nn.Module):
def __init__(self, hidden_size_per_expert, activation_fn=None, activation_fn_with_self=None, output_dim=None):
super().__init__()
self.skip_expert = (int(torch.os.environ.get('SKIP_EXPERT', '0')) != 0)
self.hidden_size_per_expert = hidden_size_per_expert
self.output_dim = output_dim
if activation_fn_with_self is not None:
assert activation_fn is None, "Option `activation_fn_with_self` has been specified, please keep exactly one of them."
activation_fn = lambda x: activation_fn_with_self(x, self)
if activation_fn is None:
activation_fn = lambda x: F.relu(x)
self.activation_fn = activation_fn
def update(self, ctx):
if ctx.sharded_count > 1:
assert self.hidden_size_per_expert % ctx.sharded_count == 0, f"Can't evenly divide hidden_size_per_expert ({self.hidden_size_per_expert}) to {ctx.sharded_count} slices."
hidden_size = self.hidden_size_per_expert // ctx.sharded_count
model_dim = ctx.model_dim
local_experts = ctx.num_local_experts
self.output_dim = self.output_dim or model_dim
fc1_weight = torch.empty(1, local_experts, hidden_size, model_dim)
fc2_weight = torch.empty(1, local_experts, hidden_size, self.output_dim)
fc1_bias = torch.empty(1, local_experts, 1, hidden_size)
fc2_bias = torch.empty(1, local_experts, 1, (self.output_dim + ctx.sharded_count - 1) // ctx.sharded_count)
for i in range(local_experts):
fc1 = torch.nn.Linear(model_dim, hidden_size)
fc2 = torch.nn.Linear(hidden_size, self.output_dim)
fc1_weight[0, i, :, :], fc1_bias[0, i, :, :] = fc1.weight, fc1.bias
fc2_weight[0, i, :, :], fc2_bias[0, i, :, :] = fc2.weight.t(), fc2.bias[:fc2_bias.size(-1)]
self.register_parameter(name='batched_fc1_w', param=torch.nn.Parameter(fc1_weight.squeeze(0)))
self.register_parameter(name='batched_fc2_w', param=torch.nn.Parameter(fc2_weight.squeeze(0)))
self.register_parameter(name='batched_fc1_bias', param=torch.nn.Parameter(fc1_bias.squeeze(0)))
self.register_parameter(name='batched_fc2_bias', param=torch.nn.Parameter(fc2_bias.squeeze(0)))
def extra_repr(self):
return 'model_dim=%d, hidden_size=%d, output_dim=%d, local_experts=%d' % (
self.batched_fc1_w.size(2), self.batched_fc1_w.size(1), self.batched_fc2_w.size(2), self.batched_fc1_w.size(0)
)
def forward(self, x, ctx):
if self.skip_expert:
return x
batched_fc1_w = self.batched_fc1_w
batched_fc2_w = self.batched_fc2_w
batched_fc1_bias = self.batched_fc1_bias
batched_fc2_bias = self.batched_fc2_bias
if ctx.ffn_zero_group is not None:
if not ctx.use_model_parallel:
batched_fc1_w = zero_gather(batched_fc1_w, group=ctx.ffn_zero_group).view(1, -1, ctx.model_dim)
batched_fc2_w = zero_gather(batched_fc2_w, group=ctx.ffn_zero_group).view(1, -1, self.output_dim)
batched_fc1_bias = zero_gather(batched_fc1_bias, group=ctx.ffn_zero_group).view(1, 1, -1)
batched_fc2_bias = zero_gather(batched_fc2_bias, group=ctx.ffn_zero_group)
batched_fc2_bias = batched_fc2_bias.view(self.batched_fc2_bias.size(0), self.batched_fc2_bias.size(1), -1)
if batched_fc2_bias.size(-1) != self.output_dim:
batched_fc2_bias = batched_fc2_bias[:, :, :self.output_dim]
if ctx.use_model_parallel:
batched_fc2_bias = torch.mul(batched_fc2_bias, 1.0 / ctx.sharded_count)
y = torch.add(torch.matmul(x, batched_fc1_w.permute(0, 2, 1)), batched_fc1_bias)
y = self.activation_fn(y)
y = torch.add(torch.matmul(y, batched_fc2_w), batched_fc2_bias)
return y
def to(self, *args, **kwargs):
self = super().to(*args, **kwargs)
self.fc1_weight = self.fc1_weight.to(*args, **kwargs)
self.fc2_weight = self.fc2_weight.to(*args, **kwargs)
self.fc1_bias = self.fc1_bias.to(*args, **kwargs)
self.fc2_bias = self.fc2_bias.to(*args, **kwargs)
return self
ExpertModule = FusedExpertsNetwork | 0.890604 | 0.571826 |
from __future__ import print_function
import hashlib
import hmac
import os
import struct
import sys
import time
import ldap
from ldap.cidict import cidict as CIDict
from ldap.ldapobject import LDAPObject
if len(sys.argv) > 1 and sys.argv[1] == "--check":
raise SystemExit(0)
def get_digits(h, digits):
offset = h[19] & 15
number = struct.unpack(">I", h[offset:offset+4])[0] & 0x7fffffff
number %= (10 ** digits)
return ("%0*d" % (digits, number)).encode()
def get_hotp_token(secret, interval_no):
msg = struct.pack(">Q", interval_no)
h = hmac.new(secret, msg, hashlib.sha1).digest()
return get_digits(bytearray(h), 6)
def get_interval(period=30):
return int(time.time() // period)
def get_token_for(connection, dn, typ="totp"):
result = connection.search_s(dn, ldap.SCOPE_BASE)
dn, attrs = result[0]
attrs = CIDict(attrs)
tokendn = attrs['oath'+typ+'token'][0].decode()
result = connection.search_s(tokendn, ldap.SCOPE_BASE)
dn, attrs = result[0]
attrs = CIDict(attrs)
return dn, attrs
def main():
uri = os.environ["URI1"]
managerdn = os.environ['MANAGERDN']
passwd = os.environ['<PASSWORD>']
babsdn = os.environ['BABSDN']
babspw = b"bjensen"
bjornsdn = os.environ['BJORNSDN']
bjornspw = b"bjorn"
connection = LDAPObject(uri)
start = time.time()
connection.bind_s(managerdn, passwd)
end = time.time()
if end - start > 1:
print("It takes more than a second to connect and bind, "
"skipping potentially unstable test", file=sys.stderr)
raise SystemExit(0)
dn, token_entry = get_token_for(connection, babsdn)
paramsdn = token_entry['oathTOTPParams'][0].decode()
result = connection.search_s(paramsdn, ldap.SCOPE_BASE)
_, attrs = result[0]
params = CIDict(attrs)
secret = token_entry['oathSecret'][0]
period = int(params['oathTOTPTimeStepPeriod'][0].decode())
bind_conn = LDAPObject(uri)
interval_no = get_interval(period)
token = get_hotp_token(secret, interval_no-3)
print("Testing old tokens are not useable")
bind_conn.bind_s(babsdn, babspw+token)
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with an old token should have failed")
interval_no = get_interval(period)
token = get_hotp_token(secret, interval_no)
print("Testing token can only be used once")
bind_conn.bind_s(babsdn, babspw+token)
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with a reused token should have failed")
token = get_hotp_token(secret, interval_no+1)
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
raise SystemExit("Bind should have succeeded")
dn, token_entry = get_token_for(connection, babsdn)
last = int(token_entry['oathTOTPLastTimeStep'][0].decode())
if last != interval_no+1:
SystemExit("Unexpected counter value %d (expected %d)" %
(last, interval_no+1))
print("Resetting counter and testing secret sharing between accounts")
connection.modify_s(dn, [(ldap.MOD_REPLACE, 'oathTOTPLastTimeStep', [])])
interval_no = get_interval(period)
token = get_hotp_token(secret, interval_no)
try:
bind_conn.bind_s(bjornsdn, bjornspw+token)
except ldap.INVALID_CREDENTIALS:
raise SystemExit("Bind should have succeeded")
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with a reused token should have failed")
print("Testing token is retired even with a wrong password")
connection.modify_s(dn, [(ldap.MOD_REPLACE, 'oathTOTPLastTimeStep', [])])
interval_no = get_interval(period)
token = get_hotp_token(secret, interval_no)
try:
bind_conn.bind_s(babsdn, b"not the password"+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with an incorrect password should have failed")
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with a reused token should have failed")
token = get_hotp_token(secret, interval_no+1)
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
raise SystemExit("Bind should have succeeded")
if __name__ == "__main__":
sys.exit(main()) | tests/scripts/test081-totp.py |
from __future__ import print_function
import hashlib
import hmac
import os
import struct
import sys
import time
import ldap
from ldap.cidict import cidict as CIDict
from ldap.ldapobject import LDAPObject
if len(sys.argv) > 1 and sys.argv[1] == "--check":
raise SystemExit(0)
def get_digits(h, digits):
offset = h[19] & 15
number = struct.unpack(">I", h[offset:offset+4])[0] & 0x7fffffff
number %= (10 ** digits)
return ("%0*d" % (digits, number)).encode()
def get_hotp_token(secret, interval_no):
msg = struct.pack(">Q", interval_no)
h = hmac.new(secret, msg, hashlib.sha1).digest()
return get_digits(bytearray(h), 6)
def get_interval(period=30):
return int(time.time() // period)
def get_token_for(connection, dn, typ="totp"):
result = connection.search_s(dn, ldap.SCOPE_BASE)
dn, attrs = result[0]
attrs = CIDict(attrs)
tokendn = attrs['oath'+typ+'token'][0].decode()
result = connection.search_s(tokendn, ldap.SCOPE_BASE)
dn, attrs = result[0]
attrs = CIDict(attrs)
return dn, attrs
def main():
uri = os.environ["URI1"]
managerdn = os.environ['MANAGERDN']
passwd = os.environ['<PASSWORD>']
babsdn = os.environ['BABSDN']
babspw = b"bjensen"
bjornsdn = os.environ['BJORNSDN']
bjornspw = b"bjorn"
connection = LDAPObject(uri)
start = time.time()
connection.bind_s(managerdn, passwd)
end = time.time()
if end - start > 1:
print("It takes more than a second to connect and bind, "
"skipping potentially unstable test", file=sys.stderr)
raise SystemExit(0)
dn, token_entry = get_token_for(connection, babsdn)
paramsdn = token_entry['oathTOTPParams'][0].decode()
result = connection.search_s(paramsdn, ldap.SCOPE_BASE)
_, attrs = result[0]
params = CIDict(attrs)
secret = token_entry['oathSecret'][0]
period = int(params['oathTOTPTimeStepPeriod'][0].decode())
bind_conn = LDAPObject(uri)
interval_no = get_interval(period)
token = get_hotp_token(secret, interval_no-3)
print("Testing old tokens are not useable")
bind_conn.bind_s(babsdn, babspw+token)
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with an old token should have failed")
interval_no = get_interval(period)
token = get_hotp_token(secret, interval_no)
print("Testing token can only be used once")
bind_conn.bind_s(babsdn, babspw+token)
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with a reused token should have failed")
token = get_hotp_token(secret, interval_no+1)
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
raise SystemExit("Bind should have succeeded")
dn, token_entry = get_token_for(connection, babsdn)
last = int(token_entry['oathTOTPLastTimeStep'][0].decode())
if last != interval_no+1:
SystemExit("Unexpected counter value %d (expected %d)" %
(last, interval_no+1))
print("Resetting counter and testing secret sharing between accounts")
connection.modify_s(dn, [(ldap.MOD_REPLACE, 'oathTOTPLastTimeStep', [])])
interval_no = get_interval(period)
token = get_hotp_token(secret, interval_no)
try:
bind_conn.bind_s(bjornsdn, bjornspw+token)
except ldap.INVALID_CREDENTIALS:
raise SystemExit("Bind should have succeeded")
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with a reused token should have failed")
print("Testing token is retired even with a wrong password")
connection.modify_s(dn, [(ldap.MOD_REPLACE, 'oathTOTPLastTimeStep', [])])
interval_no = get_interval(period)
token = get_hotp_token(secret, interval_no)
try:
bind_conn.bind_s(babsdn, b"not the password"+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with an incorrect password should have failed")
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
pass
else:
raise SystemExit("Bind with a reused token should have failed")
token = get_hotp_token(secret, interval_no+1)
try:
bind_conn.bind_s(babsdn, babspw+token)
except ldap.INVALID_CREDENTIALS:
raise SystemExit("Bind should have succeeded")
if __name__ == "__main__":
sys.exit(main()) | 0.38318 | 0.172625 |
from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import relpath
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_ext import build_ext
from distutils.core import Extension
from distutils.errors import CCompilerError
from distutils.errors import CompileError
from distutils.errors import DistutilsExecError
from distutils.errors import DistutilsPlatformError
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
class optional_build_ext(build_ext):
'''Allow the building of C extensions to fail.'''
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as e:
self._unavailable(e)
self.extensions = [] # avoid copying missing files (it would fail).
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, CompileError, DistutilsExecError) as e:
self._unavailable(e)
self.extensions = [] # avoid copying missing files (it would fail).
def _unavailable(self, e):
print('*' * 80)
print('''WARNING:
An optional code optimization (C extension) could not be compiled.
Optimizations for this package will not be available!
''')
print('CAUSE:')
print('')
print(' ' + repr(e))
print('*' * 80)
setup(
name='knitlib',
version='0.0.1',
license='GPLv3',
description='A library designed to support varied knitting machines.',
long_description='%s\n%s' % (read('README.rst'), re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))),
author='<NAME> , <NAME>',
author_email='<EMAIL> , <EMAIL>',
url='https://github.com/fashiontec/knitlib',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
'click',
'fysom',
'pyserial',
'enum34',
'Pillow',
'jsonschema',
'fs'
],
extras_require={
# eg: 'rst': ['docutils>=0.11'],
},
entry_points={
'console_scripts': [
'knitlib = knitlib.__main__:main',
]
},
cmdclass={'build_ext': optional_build_ext},
ext_modules=[
Extension(
splitext(relpath(path, 'src').replace(os.sep, '.'))[0],
sources=[path],
include_dirs=[dirname(path)]
)
for root, _, _ in os.walk('src')
for path in glob(join(root, '*.c'))
]
) | setup.py | from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import relpath
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
from setuptools.command.build_ext import build_ext
from distutils.core import Extension
from distutils.errors import CCompilerError
from distutils.errors import CompileError
from distutils.errors import DistutilsExecError
from distutils.errors import DistutilsPlatformError
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
class optional_build_ext(build_ext):
'''Allow the building of C extensions to fail.'''
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as e:
self._unavailable(e)
self.extensions = [] # avoid copying missing files (it would fail).
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, CompileError, DistutilsExecError) as e:
self._unavailable(e)
self.extensions = [] # avoid copying missing files (it would fail).
def _unavailable(self, e):
print('*' * 80)
print('''WARNING:
An optional code optimization (C extension) could not be compiled.
Optimizations for this package will not be available!
''')
print('CAUSE:')
print('')
print(' ' + repr(e))
print('*' * 80)
setup(
name='knitlib',
version='0.0.1',
license='GPLv3',
description='A library designed to support varied knitting machines.',
long_description='%s\n%s' % (read('README.rst'), re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))),
author='<NAME> , <NAME>',
author_email='<EMAIL> , <EMAIL>',
url='https://github.com/fashiontec/knitlib',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
'click',
'fysom',
'pyserial',
'enum34',
'Pillow',
'jsonschema',
'fs'
],
extras_require={
# eg: 'rst': ['docutils>=0.11'],
},
entry_points={
'console_scripts': [
'knitlib = knitlib.__main__:main',
]
},
cmdclass={'build_ext': optional_build_ext},
ext_modules=[
Extension(
splitext(relpath(path, 'src').replace(os.sep, '.'))[0],
sources=[path],
include_dirs=[dirname(path)]
)
for root, _, _ in os.walk('src')
for path in glob(join(root, '*.c'))
]
) | 0.460289 | 0.072933 |
import __future__
import ast
import dis
import sys
import unittest
from compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_NEWLOCALS,
CO_NOFREE,
CO_OPTIMIZED,
)
from dis import opmap, opname
from unittest import TestCase
from .common import CompilerTest
class FlagTests(CompilerTest):
def test_future_no_longer_relevant(self):
f = self.run_code(
"""
from __future__ import print_function
def f(): pass"""
)["f"]
self.assertEqual(f.__code__.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS)
def test_future_gen_stop(self):
f = self.run_code(
"""
from __future__ import generator_stop
def f(): pass"""
)["f"]
expected = CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS
self.assertEqual(f.__code__.co_flags, expected)
def test_future_barry_as_bdfl(self):
f = self.run_code(
"""
from __future__ import barry_as_FLUFL
def f(): pass"""
)["f"]
self.assertEqual(
f.__code__.co_flags,
__future__.CO_FUTURE_BARRY_AS_BDFL
| CO_NOFREE
| CO_OPTIMIZED
| CO_NEWLOCALS,
)
def test_braces(self):
with self.assertRaisesRegex(SyntaxError, "not a chance"):
f = self.run_code(
"""
from __future__ import braces
def f(): pass"""
)
def test_gen_func(self):
f = self.run_code("def f(): yield")["f"]
self.assertEqual(
f.__code__.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_GENERATOR
)
def test_async_gen_func(self):
f = self.run_code(
"""
async def f():
yield
await foo"""
)["f"]
self.assertEqual(
f.__code__.co_flags,
CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_ASYNC_GENERATOR,
)
def test_gen_func_yield_from(self):
f = self.run_code("def f(): yield from (1, 2, 3)")["f"]
self.assertEqual(
f.__code__.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_GENERATOR
)
def test_gen_exp(self):
f = self.compile("x = (x for x in (1, 2, 3))")
code = self.find_code(f)
self.assertEqual(
code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_GENERATOR
)
def test_list_comp(self):
f = self.compile("x = [x for x in (1, 2, 3)]")
code = self.find_code(f)
self.assertEqual(code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS)
def test_dict_comp(self):
f = self.compile("x = {x:x for x in (1, 2, 3)}")
code = self.find_code(f)
self.assertEqual(code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS)
def test_set_comp(self):
f = self.compile("x = {x for x in (1, 2, 3)}")
code = self.find_code(f)
self.assertEqual(code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS)
def test_class(self):
f = self.compile("class C: pass")
code = self.find_code(f)
self.assertEqual(code.co_flags, CO_NOFREE)
def test_coroutine(self):
f = self.compile("async def f(): pass")
code = self.find_code(f)
self.assertEqual(
code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_COROUTINE
)
def test_coroutine_await(self):
f = self.compile("async def f(): await foo")
code = self.find_code(f)
self.assertEqual(
code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_COROUTINE
)
def test_free_vars(self):
f = self.compile(
"""
def g():
x = 2
def f():
return x"""
)
code = self.find_code(self.find_code(f))
self.assertEqual(code.co_flags, CO_NESTED | CO_OPTIMIZED | CO_NEWLOCALS)
if __name__ == "__main__":
unittest.main() | Lib/test/test_compiler/test_flags.py | import __future__
import ast
import dis
import sys
import unittest
from compiler.consts import (
CO_ASYNC_GENERATOR,
CO_COROUTINE,
CO_GENERATOR,
CO_NESTED,
CO_NEWLOCALS,
CO_NOFREE,
CO_OPTIMIZED,
)
from dis import opmap, opname
from unittest import TestCase
from .common import CompilerTest
class FlagTests(CompilerTest):
def test_future_no_longer_relevant(self):
f = self.run_code(
"""
from __future__ import print_function
def f(): pass"""
)["f"]
self.assertEqual(f.__code__.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS)
def test_future_gen_stop(self):
f = self.run_code(
"""
from __future__ import generator_stop
def f(): pass"""
)["f"]
expected = CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS
self.assertEqual(f.__code__.co_flags, expected)
def test_future_barry_as_bdfl(self):
f = self.run_code(
"""
from __future__ import barry_as_FLUFL
def f(): pass"""
)["f"]
self.assertEqual(
f.__code__.co_flags,
__future__.CO_FUTURE_BARRY_AS_BDFL
| CO_NOFREE
| CO_OPTIMIZED
| CO_NEWLOCALS,
)
def test_braces(self):
with self.assertRaisesRegex(SyntaxError, "not a chance"):
f = self.run_code(
"""
from __future__ import braces
def f(): pass"""
)
def test_gen_func(self):
f = self.run_code("def f(): yield")["f"]
self.assertEqual(
f.__code__.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_GENERATOR
)
def test_async_gen_func(self):
f = self.run_code(
"""
async def f():
yield
await foo"""
)["f"]
self.assertEqual(
f.__code__.co_flags,
CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_ASYNC_GENERATOR,
)
def test_gen_func_yield_from(self):
f = self.run_code("def f(): yield from (1, 2, 3)")["f"]
self.assertEqual(
f.__code__.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_GENERATOR
)
def test_gen_exp(self):
f = self.compile("x = (x for x in (1, 2, 3))")
code = self.find_code(f)
self.assertEqual(
code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_GENERATOR
)
def test_list_comp(self):
f = self.compile("x = [x for x in (1, 2, 3)]")
code = self.find_code(f)
self.assertEqual(code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS)
def test_dict_comp(self):
f = self.compile("x = {x:x for x in (1, 2, 3)}")
code = self.find_code(f)
self.assertEqual(code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS)
def test_set_comp(self):
f = self.compile("x = {x for x in (1, 2, 3)}")
code = self.find_code(f)
self.assertEqual(code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS)
def test_class(self):
f = self.compile("class C: pass")
code = self.find_code(f)
self.assertEqual(code.co_flags, CO_NOFREE)
def test_coroutine(self):
f = self.compile("async def f(): pass")
code = self.find_code(f)
self.assertEqual(
code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_COROUTINE
)
def test_coroutine_await(self):
f = self.compile("async def f(): await foo")
code = self.find_code(f)
self.assertEqual(
code.co_flags, CO_NOFREE | CO_OPTIMIZED | CO_NEWLOCALS | CO_COROUTINE
)
def test_free_vars(self):
f = self.compile(
"""
def g():
x = 2
def f():
return x"""
)
code = self.find_code(self.find_code(f))
self.assertEqual(code.co_flags, CO_NESTED | CO_OPTIMIZED | CO_NEWLOCALS)
if __name__ == "__main__":
unittest.main() | 0.414188 | 0.16807 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
# print('output shape', output.size())
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1) # split along first dimension with size 1 a list?? # N x n_jt split into [njt: Nxn_pix?] a list?
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze() # N x long list
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += 0.5 * self.criterion(
heatmap_pred.mul(target_weight[:, idx]),
heatmap_gt.mul(target_weight[:, idx])
)
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_joints
class JointsOHKMMSELoss(nn.Module):
def __init__(self, use_target_weight, topk=8):
super(JointsOHKMMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='none')
self.use_target_weight = use_target_weight
self.topk = topk
def ohkm(self, loss):
ohkm_loss = 0.
for i in range(loss.size()[0]):
sub_loss = loss[i]
topk_val, topk_idx = torch.topk(
sub_loss, k=self.topk, dim=0, sorted=False
)
tmp_loss = torch.gather(sub_loss, 0, topk_idx)
ohkm_loss += torch.sum(tmp_loss) / self.topk
ohkm_loss /= loss.size()[0]
return ohkm_loss
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = []
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss.append(0.5 * self.criterion(
heatmap_pred.mul(target_weight[:, idx]),
heatmap_gt.mul(target_weight[:, idx])
))
else:
loss.append(
0.5 * self.criterion(heatmap_pred, heatmap_gt)
)
loss = [l.mean(dim=1).unsqueeze(dim=1) for l in loss]
loss = torch.cat(loss, dim=1)
return self.ohkm(loss) | core/loss.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight):
super(JointsMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
# print('output shape', output.size())
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1) # split along first dimension with size 1 a list?? # N x n_jt split into [njt: Nxn_pix?] a list?
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze() # N x long list
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += 0.5 * self.criterion(
heatmap_pred.mul(target_weight[:, idx]),
heatmap_gt.mul(target_weight[:, idx])
)
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
return loss / num_joints
class JointsOHKMMSELoss(nn.Module):
def __init__(self, use_target_weight, topk=8):
super(JointsOHKMMSELoss, self).__init__()
self.criterion = nn.MSELoss(reduction='none')
self.use_target_weight = use_target_weight
self.topk = topk
def ohkm(self, loss):
ohkm_loss = 0.
for i in range(loss.size()[0]):
sub_loss = loss[i]
topk_val, topk_idx = torch.topk(
sub_loss, k=self.topk, dim=0, sorted=False
)
tmp_loss = torch.gather(sub_loss, 0, topk_idx)
ohkm_loss += torch.sum(tmp_loss) / self.topk
ohkm_loss /= loss.size()[0]
return ohkm_loss
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_joints = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_joints, -1)).split(1, 1)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = []
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss.append(0.5 * self.criterion(
heatmap_pred.mul(target_weight[:, idx]),
heatmap_gt.mul(target_weight[:, idx])
))
else:
loss.append(
0.5 * self.criterion(heatmap_pred, heatmap_gt)
)
loss = [l.mean(dim=1).unsqueeze(dim=1) for l in loss]
loss = torch.cat(loss, dim=1)
return self.ohkm(loss) | 0.866853 | 0.319373 |
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayPcreditHuabeiAuthPageSignModel(object):
def __init__(self):
self._auth_scene = None
self._external_logon_id = None
self._freeze_amount = None
self._order_title = None
self._out_request_no = None
self._out_sign_no = None
self._seller_id = None
self._timeout_express = None
@property
def auth_scene(self):
return self._auth_scene
@auth_scene.setter
def auth_scene(self, value):
self._auth_scene = value
@property
def external_logon_id(self):
return self._external_logon_id
@external_logon_id.setter
def external_logon_id(self, value):
self._external_logon_id = value
@property
def freeze_amount(self):
return self._freeze_amount
@freeze_amount.setter
def freeze_amount(self, value):
self._freeze_amount = value
@property
def order_title(self):
return self._order_title
@order_title.setter
def order_title(self, value):
self._order_title = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def out_sign_no(self):
return self._out_sign_no
@out_sign_no.setter
def out_sign_no(self, value):
self._out_sign_no = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def timeout_express(self):
return self._timeout_express
@timeout_express.setter
def timeout_express(self, value):
self._timeout_express = value
def to_alipay_dict(self):
params = dict()
if self.auth_scene:
if hasattr(self.auth_scene, 'to_alipay_dict'):
params['auth_scene'] = self.auth_scene.to_alipay_dict()
else:
params['auth_scene'] = self.auth_scene
if self.external_logon_id:
if hasattr(self.external_logon_id, 'to_alipay_dict'):
params['external_logon_id'] = self.external_logon_id.to_alipay_dict()
else:
params['external_logon_id'] = self.external_logon_id
if self.freeze_amount:
if hasattr(self.freeze_amount, 'to_alipay_dict'):
params['freeze_amount'] = self.freeze_amount.to_alipay_dict()
else:
params['freeze_amount'] = self.freeze_amount
if self.order_title:
if hasattr(self.order_title, 'to_alipay_dict'):
params['order_title'] = self.order_title.to_alipay_dict()
else:
params['order_title'] = self.order_title
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
if self.out_sign_no:
if hasattr(self.out_sign_no, 'to_alipay_dict'):
params['out_sign_no'] = self.out_sign_no.to_alipay_dict()
else:
params['out_sign_no'] = self.out_sign_no
if self.seller_id:
if hasattr(self.seller_id, 'to_alipay_dict'):
params['seller_id'] = self.seller_id.to_alipay_dict()
else:
params['seller_id'] = self.seller_id
if self.timeout_express:
if hasattr(self.timeout_express, 'to_alipay_dict'):
params['timeout_express'] = self.timeout_express.to_alipay_dict()
else:
params['timeout_express'] = self.timeout_express
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayPcreditHuabeiAuthPageSignModel()
if 'auth_scene' in d:
o.auth_scene = d['auth_scene']
if 'external_logon_id' in d:
o.external_logon_id = d['external_logon_id']
if 'freeze_amount' in d:
o.freeze_amount = d['freeze_amount']
if 'order_title' in d:
o.order_title = d['order_title']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
if 'out_sign_no' in d:
o.out_sign_no = d['out_sign_no']
if 'seller_id' in d:
o.seller_id = d['seller_id']
if 'timeout_express' in d:
o.timeout_express = d['timeout_express']
return o | alipay/aop/api/domain/AlipayPcreditHuabeiAuthPageSignModel.py | import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayPcreditHuabeiAuthPageSignModel(object):
def __init__(self):
self._auth_scene = None
self._external_logon_id = None
self._freeze_amount = None
self._order_title = None
self._out_request_no = None
self._out_sign_no = None
self._seller_id = None
self._timeout_express = None
@property
def auth_scene(self):
return self._auth_scene
@auth_scene.setter
def auth_scene(self, value):
self._auth_scene = value
@property
def external_logon_id(self):
return self._external_logon_id
@external_logon_id.setter
def external_logon_id(self, value):
self._external_logon_id = value
@property
def freeze_amount(self):
return self._freeze_amount
@freeze_amount.setter
def freeze_amount(self, value):
self._freeze_amount = value
@property
def order_title(self):
return self._order_title
@order_title.setter
def order_title(self, value):
self._order_title = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def out_sign_no(self):
return self._out_sign_no
@out_sign_no.setter
def out_sign_no(self, value):
self._out_sign_no = value
@property
def seller_id(self):
return self._seller_id
@seller_id.setter
def seller_id(self, value):
self._seller_id = value
@property
def timeout_express(self):
return self._timeout_express
@timeout_express.setter
def timeout_express(self, value):
self._timeout_express = value
def to_alipay_dict(self):
params = dict()
if self.auth_scene:
if hasattr(self.auth_scene, 'to_alipay_dict'):
params['auth_scene'] = self.auth_scene.to_alipay_dict()
else:
params['auth_scene'] = self.auth_scene
if self.external_logon_id:
if hasattr(self.external_logon_id, 'to_alipay_dict'):
params['external_logon_id'] = self.external_logon_id.to_alipay_dict()
else:
params['external_logon_id'] = self.external_logon_id
if self.freeze_amount:
if hasattr(self.freeze_amount, 'to_alipay_dict'):
params['freeze_amount'] = self.freeze_amount.to_alipay_dict()
else:
params['freeze_amount'] = self.freeze_amount
if self.order_title:
if hasattr(self.order_title, 'to_alipay_dict'):
params['order_title'] = self.order_title.to_alipay_dict()
else:
params['order_title'] = self.order_title
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
if self.out_sign_no:
if hasattr(self.out_sign_no, 'to_alipay_dict'):
params['out_sign_no'] = self.out_sign_no.to_alipay_dict()
else:
params['out_sign_no'] = self.out_sign_no
if self.seller_id:
if hasattr(self.seller_id, 'to_alipay_dict'):
params['seller_id'] = self.seller_id.to_alipay_dict()
else:
params['seller_id'] = self.seller_id
if self.timeout_express:
if hasattr(self.timeout_express, 'to_alipay_dict'):
params['timeout_express'] = self.timeout_express.to_alipay_dict()
else:
params['timeout_express'] = self.timeout_express
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayPcreditHuabeiAuthPageSignModel()
if 'auth_scene' in d:
o.auth_scene = d['auth_scene']
if 'external_logon_id' in d:
o.external_logon_id = d['external_logon_id']
if 'freeze_amount' in d:
o.freeze_amount = d['freeze_amount']
if 'order_title' in d:
o.order_title = d['order_title']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
if 'out_sign_no' in d:
o.out_sign_no = d['out_sign_no']
if 'seller_id' in d:
o.seller_id = d['seller_id']
if 'timeout_express' in d:
o.timeout_express = d['timeout_express']
return o | 0.477067 | 0.093347 |
"""Segmentation input and model functions for serving/inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from dataloader import mode_keys
from modeling import factory
from serving import inputs
from hyperparameters import params_dict
def serving_input_fn(batch_size, desired_image_size, stride):
"""Input function for SavedModels and TF serving.
Args:
batch_size: The batch size.
desired_image_size: The tuple/list of two integers, specifying the desired
image size.
stride: an integer, the stride of the backbone network. The processed image
will be (internally) padded such that each side is the multiple of this
number.
Returns:
A `tf.estimator.export.ServingInputReceiver` for a SavedModel.
"""
placeholder, features = inputs.image_bytes_input(
batch_size, desired_image_size, stride)
keys_placeholder = tf.placeholder_with_default(['default'],
shape=[None],
name='key')
score_threshold_placeholder = tf.placeholder(dtype=tf.float32, shape=[None])
receiver_tensors = {
'image_bytes': placeholder,
'key': keys_placeholder,
'score_thresholds': score_threshold_placeholder
}
features.update({'key': keys_placeholder})
features.update({'score_thresholds': score_threshold_placeholder})
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=receiver_tensors)
def serving_model_fn_builder(export_tpu_model, output_image_info):
"""Serving model_fn builder.
Args:
export_tpu_model: bool, whether to export a TPU or CPU/GPU model.
output_image_info: bool, whether output the image_info node.
Returns:
A function that returns (TPU)EstimatorSpec for PREDICT mode.
"""
def _serving_model_fn(features, labels, mode, params):
"""Builds the serving model_fn."""
del labels # unused.
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError('To build the serving model_fn, set '
'mode = `tf.estimator.ModeKeys.PREDICT`')
model_params = params_dict.ParamsDict(params)
images = features['images']
_, height, width, _ = images.get_shape().as_list()
model_fn = factory.model_generator(model_params)
outputs = model_fn.build_outputs(
features['images'], labels=None, mode=mode_keys.PREDICT)
logits = tf.image.resize_bilinear(
outputs['logits'], tf.shape(images)[1:3], align_corners=False)
# NOTE: The above image size is scaled and padded. We will first crop
# out the scaled image to remove padding and then re-scale back to the
# original image size.
original_image_size = tf.squeeze(features['image_info'][:, 0:1, :])
original_height = original_image_size[0]
original_width = original_image_size[1]
scaling = tf.squeeze(features['image_info'][:, 2:3, :])
scaled_height = original_height * scaling[0]
scaled_width = original_width * scaling[1]
offset_height = tf.zeros_like(height, dtype=tf.int32)
offset_width = tf.zeros_like(width, dtype=tf.int32)
logits = tf.image.crop_to_bounding_box(
logits, offset_height, offset_width,
tf.cast(scaled_height, dtype=tf.int32),
tf.cast(scaled_width, dtype=tf.int32))
logits = tf.image.resize_bilinear(
logits,
tf.cast(original_image_size, dtype=tf.int32),
align_corners=False)
probabilities = tf.nn.softmax(logits)
score_threshold_placeholder = features['score_thresholds']
key_placeholder = features['key']
score_threshold_pred_expanded = score_threshold_placeholder
for _ in range(0, logits.shape.ndims - 1):
score_threshold_pred_expanded = tf.expand_dims(
score_threshold_pred_expanded, -1)
scores = tf.where(probabilities > score_threshold_pred_expanded,
probabilities, tf.zeros_like(probabilities))
scores = tf.reduce_max(scores, 3)
scores = tf.expand_dims(scores, -1)
scores = tf.cast(tf.minimum(scores * 255.0, 255), tf.uint8)
categories = tf.to_int32(tf.expand_dims(tf.argmax(probabilities, 3), -1))
# Generate images for scores and categories.
score_bytes = tf.map_fn(
tf.image.encode_png, scores, back_prop=False, dtype=tf.string)
category_bytes = tf.map_fn(
tf.image.encode_png,
tf.cast(categories, tf.uint8),
back_prop=False,
dtype=tf.string)
predictions = {}
predictions['category_bytes'] = tf.identity(
category_bytes, name='category_bytes')
predictions['score_bytes'] = tf.identity(score_bytes, name='score_bytes')
predictions['key'] = tf.identity(key_placeholder, name='key')
if output_image_info:
predictions['image_info'] = tf.identity(
features['image_info'], name='image_info')
if export_tpu_model:
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions)
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
return _serving_model_fn | models/official/detection/serving/segmentation.py | """Segmentation input and model functions for serving/inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from dataloader import mode_keys
from modeling import factory
from serving import inputs
from hyperparameters import params_dict
def serving_input_fn(batch_size, desired_image_size, stride):
"""Input function for SavedModels and TF serving.
Args:
batch_size: The batch size.
desired_image_size: The tuple/list of two integers, specifying the desired
image size.
stride: an integer, the stride of the backbone network. The processed image
will be (internally) padded such that each side is the multiple of this
number.
Returns:
A `tf.estimator.export.ServingInputReceiver` for a SavedModel.
"""
placeholder, features = inputs.image_bytes_input(
batch_size, desired_image_size, stride)
keys_placeholder = tf.placeholder_with_default(['default'],
shape=[None],
name='key')
score_threshold_placeholder = tf.placeholder(dtype=tf.float32, shape=[None])
receiver_tensors = {
'image_bytes': placeholder,
'key': keys_placeholder,
'score_thresholds': score_threshold_placeholder
}
features.update({'key': keys_placeholder})
features.update({'score_thresholds': score_threshold_placeholder})
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=receiver_tensors)
def serving_model_fn_builder(export_tpu_model, output_image_info):
"""Serving model_fn builder.
Args:
export_tpu_model: bool, whether to export a TPU or CPU/GPU model.
output_image_info: bool, whether output the image_info node.
Returns:
A function that returns (TPU)EstimatorSpec for PREDICT mode.
"""
def _serving_model_fn(features, labels, mode, params):
"""Builds the serving model_fn."""
del labels # unused.
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError('To build the serving model_fn, set '
'mode = `tf.estimator.ModeKeys.PREDICT`')
model_params = params_dict.ParamsDict(params)
images = features['images']
_, height, width, _ = images.get_shape().as_list()
model_fn = factory.model_generator(model_params)
outputs = model_fn.build_outputs(
features['images'], labels=None, mode=mode_keys.PREDICT)
logits = tf.image.resize_bilinear(
outputs['logits'], tf.shape(images)[1:3], align_corners=False)
# NOTE: The above image size is scaled and padded. We will first crop
# out the scaled image to remove padding and then re-scale back to the
# original image size.
original_image_size = tf.squeeze(features['image_info'][:, 0:1, :])
original_height = original_image_size[0]
original_width = original_image_size[1]
scaling = tf.squeeze(features['image_info'][:, 2:3, :])
scaled_height = original_height * scaling[0]
scaled_width = original_width * scaling[1]
offset_height = tf.zeros_like(height, dtype=tf.int32)
offset_width = tf.zeros_like(width, dtype=tf.int32)
logits = tf.image.crop_to_bounding_box(
logits, offset_height, offset_width,
tf.cast(scaled_height, dtype=tf.int32),
tf.cast(scaled_width, dtype=tf.int32))
logits = tf.image.resize_bilinear(
logits,
tf.cast(original_image_size, dtype=tf.int32),
align_corners=False)
probabilities = tf.nn.softmax(logits)
score_threshold_placeholder = features['score_thresholds']
key_placeholder = features['key']
score_threshold_pred_expanded = score_threshold_placeholder
for _ in range(0, logits.shape.ndims - 1):
score_threshold_pred_expanded = tf.expand_dims(
score_threshold_pred_expanded, -1)
scores = tf.where(probabilities > score_threshold_pred_expanded,
probabilities, tf.zeros_like(probabilities))
scores = tf.reduce_max(scores, 3)
scores = tf.expand_dims(scores, -1)
scores = tf.cast(tf.minimum(scores * 255.0, 255), tf.uint8)
categories = tf.to_int32(tf.expand_dims(tf.argmax(probabilities, 3), -1))
# Generate images for scores and categories.
score_bytes = tf.map_fn(
tf.image.encode_png, scores, back_prop=False, dtype=tf.string)
category_bytes = tf.map_fn(
tf.image.encode_png,
tf.cast(categories, tf.uint8),
back_prop=False,
dtype=tf.string)
predictions = {}
predictions['category_bytes'] = tf.identity(
category_bytes, name='category_bytes')
predictions['score_bytes'] = tf.identity(score_bytes, name='score_bytes')
predictions['key'] = tf.identity(key_placeholder, name='key')
if output_image_info:
predictions['image_info'] = tf.identity(
features['image_info'], name='image_info')
if export_tpu_model:
return tf.estimator.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions)
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
return _serving_model_fn | 0.951964 | 0.50531 |
import sys
import unittest
from kanaconv.converter import KanaConv
from kanaconv.constants import MACRON_STYLE, CIRCUMFLEX_STYLE
from .assets import (
tests_apostrophe, tests_preprocessing, tests_rare_exc, tests_word_border,
tests_long_vowels, tests_xvowels, tests_xtsu_chi, tests_freq1000,
tests_circumflex, tests_circumflex_uppercase, tests_long_vowels_uppercase
)
# Disables the subtest functionality if we're on Python 2.
PYTHON_2 = sys.version_info < (3, 0)
class TestConverter(unittest.TestCase):
'''
Test case for the KanaConv class that covers all implemented
conversion rules and checks whether all rare edge cases are
correctly handled.
Every check is a simple string comparison between what the output
is expected to be, and what the output actually is.
Run this using ./setup.py test
'''
def setUp(self):
'''
Initialize the KanaConverter.
'''
self.conv = KanaConv()
def _run_tests(self, tests, vowel_style=MACRON_STYLE, uppercase=False):
'''
Runs a series of assertEqual() tests.
'''
self.conv.set_vowel_style(vowel_style)
self.conv.set_uppercase(uppercase)
if not PYTHON_2:
for test in tests:
output = self.conv.to_romaji(test[0])
with self.subTest(word=test[0]):
self.assertEqual(output, test[1])
else:
for test in tests:
output = self.conv.to_romaji(test[0])
self.assertEqual(output, test[1])
def test_apostrophe(self):
self._run_tests(tests_apostrophe)
def test_word_border(self):
self._run_tests(tests_word_border)
def test_long_vowels(self):
self._run_tests(tests_long_vowels)
def test_xvowels(self):
self._run_tests(tests_xvowels)
def test_xtsu_chi(self):
self._run_tests(tests_xtsu_chi)
def test_rare_exc(self):
self._run_tests(tests_rare_exc)
def test_preprocessing(self):
self._run_tests(tests_preprocessing)
def test_freq1000(self):
self._run_tests(tests_freq1000)
def test_uppercase(self):
self._run_tests(tests_long_vowels_uppercase, uppercase=True)
def test_circumflex(self):
self._run_tests(tests_circumflex, vowel_style=CIRCUMFLEX_STYLE)
self._run_tests(
tests_circumflex_uppercase,
vowel_style=CIRCUMFLEX_STYLE,
uppercase=True
)
if __name__ == '__main__':
unittest.main() | kanaconv/tests/test_kanaconv.py |
import sys
import unittest
from kanaconv.converter import KanaConv
from kanaconv.constants import MACRON_STYLE, CIRCUMFLEX_STYLE
from .assets import (
tests_apostrophe, tests_preprocessing, tests_rare_exc, tests_word_border,
tests_long_vowels, tests_xvowels, tests_xtsu_chi, tests_freq1000,
tests_circumflex, tests_circumflex_uppercase, tests_long_vowels_uppercase
)
# Disables the subtest functionality if we're on Python 2.
PYTHON_2 = sys.version_info < (3, 0)
class TestConverter(unittest.TestCase):
'''
Test case for the KanaConv class that covers all implemented
conversion rules and checks whether all rare edge cases are
correctly handled.
Every check is a simple string comparison between what the output
is expected to be, and what the output actually is.
Run this using ./setup.py test
'''
def setUp(self):
'''
Initialize the KanaConverter.
'''
self.conv = KanaConv()
def _run_tests(self, tests, vowel_style=MACRON_STYLE, uppercase=False):
'''
Runs a series of assertEqual() tests.
'''
self.conv.set_vowel_style(vowel_style)
self.conv.set_uppercase(uppercase)
if not PYTHON_2:
for test in tests:
output = self.conv.to_romaji(test[0])
with self.subTest(word=test[0]):
self.assertEqual(output, test[1])
else:
for test in tests:
output = self.conv.to_romaji(test[0])
self.assertEqual(output, test[1])
def test_apostrophe(self):
self._run_tests(tests_apostrophe)
def test_word_border(self):
self._run_tests(tests_word_border)
def test_long_vowels(self):
self._run_tests(tests_long_vowels)
def test_xvowels(self):
self._run_tests(tests_xvowels)
def test_xtsu_chi(self):
self._run_tests(tests_xtsu_chi)
def test_rare_exc(self):
self._run_tests(tests_rare_exc)
def test_preprocessing(self):
self._run_tests(tests_preprocessing)
def test_freq1000(self):
self._run_tests(tests_freq1000)
def test_uppercase(self):
self._run_tests(tests_long_vowels_uppercase, uppercase=True)
def test_circumflex(self):
self._run_tests(tests_circumflex, vowel_style=CIRCUMFLEX_STYLE)
self._run_tests(
tests_circumflex_uppercase,
vowel_style=CIRCUMFLEX_STYLE,
uppercase=True
)
if __name__ == '__main__':
unittest.main() | 0.453262 | 0.535463 |
import unittest
import tests.test_utils as test_utils
from VisualBERT.mmf.utils.build import build_model
from VisualBERT.mmf.utils.configuration import Configuration
from VisualBERT.mmf.utils.env import setup_imports
BERT_VOCAB_SIZE = 30255
ROBERTA_VOCAB_SIZE = 50265
XLM_ROBERTA_VOCAB_SIZE = 250002
class TestMMFTransformerTorchscript(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
self.model_name = "mmf_transformer"
args = test_utils.dummy_args(model=self.model_name)
configuration = Configuration(args)
self.config = configuration.get_config()
self.config.model_config[self.model_name].model = self.model_name
self.finetune_model = build_model(self.config.model_config[self.model_name])
def test_load_save_finetune_model(self):
self.assertTrue(test_utils.verify_torchscript_models(self.finetune_model))
def test_finetune_bert_base(self):
model = self.finetune_model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=BERT_VOCAB_SIZE
)
)
def test_finetune_roberta_base(self):
self.config.model_config[self.model_name]["transformer_base"] = "roberta-base"
model = build_model(self.config.model_config[self.model_name])
model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=ROBERTA_VOCAB_SIZE
)
)
@test_utils.skip_if_no_network
def test_finetune_xlmr_base(self):
self.config.model_config[self.model_name][
"transformer_base"
] = "xlm-roberta-base"
model = build_model(self.config.model_config[self.model_name])
model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=XLM_ROBERTA_VOCAB_SIZE
)
) | VisualBERT/tests/models/test_mmf_transformer.py |
import unittest
import tests.test_utils as test_utils
from VisualBERT.mmf.utils.build import build_model
from VisualBERT.mmf.utils.configuration import Configuration
from VisualBERT.mmf.utils.env import setup_imports
BERT_VOCAB_SIZE = 30255
ROBERTA_VOCAB_SIZE = 50265
XLM_ROBERTA_VOCAB_SIZE = 250002
class TestMMFTransformerTorchscript(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
self.model_name = "mmf_transformer"
args = test_utils.dummy_args(model=self.model_name)
configuration = Configuration(args)
self.config = configuration.get_config()
self.config.model_config[self.model_name].model = self.model_name
self.finetune_model = build_model(self.config.model_config[self.model_name])
def test_load_save_finetune_model(self):
self.assertTrue(test_utils.verify_torchscript_models(self.finetune_model))
def test_finetune_bert_base(self):
model = self.finetune_model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=BERT_VOCAB_SIZE
)
)
def test_finetune_roberta_base(self):
self.config.model_config[self.model_name]["transformer_base"] = "roberta-base"
model = build_model(self.config.model_config[self.model_name])
model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=ROBERTA_VOCAB_SIZE
)
)
@test_utils.skip_if_no_network
def test_finetune_xlmr_base(self):
self.config.model_config[self.model_name][
"transformer_base"
] = "xlm-roberta-base"
model = build_model(self.config.model_config[self.model_name])
model.eval()
self.assertTrue(
test_utils.compare_torchscript_transformer_models(
model, vocab_size=XLM_ROBERTA_VOCAB_SIZE
)
) | 0.558929 | 0.36441 |
from apitools.base.protorpclite import messages as _messages
package = 'clouduseraccounts'
class AuthorizedKeysView(_messages.Message):
"""A list of authorized public keys for a user account.
Fields:
keys: [Output Only] The list of authorized public keys in SSH format.
sudoer: [Output Only] Whether the user has the ability to elevate on the
instance that requested the authorized keys.
"""
keys = _messages.StringField(1, repeated=True)
sudoer = _messages.BooleanField(2)
class ClouduseraccountsGlobalAccountsOperationsDeleteRequest(_messages.Message):
"""A ClouduseraccountsGlobalAccountsOperationsDeleteRequest object.
Fields:
operation: Name of the Operations resource to delete.
project: Project ID for this request.
"""
operation = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGlobalAccountsOperationsDeleteResponse(_messages.Message):
"""An empty ClouduseraccountsGlobalAccountsOperationsDelete response."""
class ClouduseraccountsGlobalAccountsOperationsGetRequest(_messages.Message):
"""A ClouduseraccountsGlobalAccountsOperationsGetRequest object.
Fields:
operation: Name of the Operations resource to return.
project: Project ID for this request.
"""
operation = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGlobalAccountsOperationsListRequest(_messages.Message):
"""A ClouduseraccountsGlobalAccountsOperationsListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests.
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: Project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class ClouduseraccountsGroupsAddMemberRequest(_messages.Message):
"""A ClouduseraccountsGroupsAddMemberRequest object.
Fields:
groupName: Name of the group for this request.
groupsAddMemberRequest: A GroupsAddMemberRequest resource to be passed as
the request body.
project: Project ID for this request.
"""
groupName = _messages.StringField(1, required=True)
groupsAddMemberRequest = _messages.MessageField('GroupsAddMemberRequest', 2)
project = _messages.StringField(3, required=True)
class ClouduseraccountsGroupsDeleteRequest(_messages.Message):
"""A ClouduseraccountsGroupsDeleteRequest object.
Fields:
groupName: Name of the Group resource to delete.
project: Project ID for this request.
"""
groupName = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGroupsGetRequest(_messages.Message):
"""A ClouduseraccountsGroupsGetRequest object.
Fields:
groupName: Name of the Group resource to return.
project: Project ID for this request.
"""
groupName = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGroupsInsertRequest(_messages.Message):
"""A ClouduseraccountsGroupsInsertRequest object.
Fields:
group: A Group resource to be passed as the request body.
project: Project ID for this request.
"""
group = _messages.MessageField('Group', 1)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGroupsListRequest(_messages.Message):
"""A ClouduseraccountsGroupsListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests.
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: Project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class ClouduseraccountsGroupsRemoveMemberRequest(_messages.Message):
"""A ClouduseraccountsGroupsRemoveMemberRequest object.
Fields:
groupName: Name of the group for this request.
groupsRemoveMemberRequest: A GroupsRemoveMemberRequest resource to be
passed as the request body.
project: Project ID for this request.
"""
groupName = _messages.StringField(1, required=True)
groupsRemoveMemberRequest = _messages.MessageField('GroupsRemoveMemberRequest', 2)
project = _messages.StringField(3, required=True)
class ClouduseraccountsLinuxGetAuthorizedKeysViewRequest(_messages.Message):
"""A ClouduseraccountsLinuxGetAuthorizedKeysViewRequest object.
Fields:
instance: The fully-qualified URL of the virtual machine requesting the
view.
login: Whether the view was requested as part of a user-initiated login.
project: Project ID for this request.
user: The user account for which you want to get a list of authorized
public keys.
zone: Name of the zone for this request.
"""
instance = _messages.StringField(1, required=True)
login = _messages.BooleanField(2)
project = _messages.StringField(3, required=True)
user = _messages.StringField(4, required=True)
zone = _messages.StringField(5, required=True)
class ClouduseraccountsLinuxGetLinuxAccountViewsRequest(_messages.Message):
"""A ClouduseraccountsLinuxGetLinuxAccountViewsRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
instance: The fully-qualified URL of the virtual machine requesting the
views.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests.
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: Project ID for this request.
zone: Name of the zone for this request.
"""
filter = _messages.StringField(1)
instance = _messages.StringField(2, required=True)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(4)
pageToken = _messages.StringField(5)
project = _messages.StringField(6, required=True)
zone = _messages.StringField(7, required=True)
class ClouduseraccountsUsersAddPublicKeyRequest(_messages.Message):
"""A ClouduseraccountsUsersAddPublicKeyRequest object.
Fields:
project: Project ID for this request.
publicKey: A PublicKey resource to be passed as the request body.
user: Name of the user for this request.
"""
project = _messages.StringField(1, required=True)
publicKey = _messages.MessageField('PublicKey', 2)
user = _messages.StringField(3, required=True)
class ClouduseraccountsUsersDeleteRequest(_messages.Message):
"""A ClouduseraccountsUsersDeleteRequest object.
Fields:
project: Project ID for this request.
user: Name of the user resource to delete.
"""
project = _messages.StringField(1, required=True)
user = _messages.StringField(2, required=True)
class ClouduseraccountsUsersGetRequest(_messages.Message):
"""A ClouduseraccountsUsersGetRequest object.
Fields:
project: Project ID for this request.
user: Name of the user resource to return.
"""
project = _messages.StringField(1, required=True)
user = _messages.StringField(2, required=True)
class ClouduseraccountsUsersInsertRequest(_messages.Message):
"""A ClouduseraccountsUsersInsertRequest object.
Fields:
project: Project ID for this request.
user: A User resource to be passed as the request body.
"""
project = _messages.StringField(1, required=True)
user = _messages.MessageField('User', 2)
class ClouduseraccountsUsersListRequest(_messages.Message):
"""A ClouduseraccountsUsersListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests.
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: Project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class ClouduseraccountsUsersRemovePublicKeyRequest(_messages.Message):
"""A ClouduseraccountsUsersRemovePublicKeyRequest object.
Fields:
fingerprint: The fingerprint of the public key to delete. Public keys are
identified by their fingerprint, which is defined by RFC4716 to be the
MD5 digest of the public key.
project: Project ID for this request.
user: Name of the user for this request.
"""
fingerprint = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
user = _messages.StringField(3, required=True)
class Group(_messages.Message):
"""A Group resource.
Fields:
creationTimestamp: [Output Only] Creation timestamp in RFC3339 text
format.
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: [Output Only] Unique identifier for the resource; defined by the
server.
kind: [Output Only] Type of the resource. Always clouduseraccounts#group
for groups.
members: [Output Only] A list of URLs to User resources who belong to the
group. Users may only be members of groups in the same project.
name: Name of the resource; provided by the client when the resource is
created.
selfLink: [Output Only] Server defined URL for the resource.
"""
creationTimestamp = _messages.StringField(1)
description = _messages.StringField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
kind = _messages.StringField(4, default=u'clouduseraccounts#group')
members = _messages.StringField(5, repeated=True)
name = _messages.StringField(6)
selfLink = _messages.StringField(7)
class GroupList(_messages.Message):
"""A GroupList object.
Fields:
id: [Output Only] Unique identifier for the resource; defined by the
server.
items: [Output Only] A list of Group resources.
kind: [Output Only] Type of resource. Always clouduseraccounts#groupList
for lists of groups.
nextPageToken: [Output Only] A token used to continue a truncated list
request.
selfLink: [Output Only] Server defined URL for this resource.
"""
id = _messages.StringField(1)
items = _messages.MessageField('Group', 2, repeated=True)
kind = _messages.StringField(3, default=u'clouduseraccounts#groupList')
nextPageToken = _messages.StringField(4)
selfLink = _messages.StringField(5)
class GroupsAddMemberRequest(_messages.Message):
"""A GroupsAddMemberRequest object.
Fields:
users: Fully-qualified URLs of the User resources to add.
"""
users = _messages.StringField(1, repeated=True)
class GroupsRemoveMemberRequest(_messages.Message):
"""A GroupsRemoveMemberRequest object.
Fields:
users: Fully-qualified URLs of the User resources to remove.
"""
users = _messages.StringField(1, repeated=True)
class LinuxAccountViews(_messages.Message):
"""A list of all Linux accounts for this project. This API is only used by
Compute Engine virtual machines to get information about user accounts for a
project or instance. Linux resources are read-only views into users and
groups managed by the Compute Engine Accounts API.
Fields:
groupViews: [Output Only] A list of all groups within a project.
kind: [Output Only] Type of the resource. Always
clouduseraccounts#linuxAccountViews for Linux resources.
userViews: [Output Only] A list of all users within a project.
"""
groupViews = _messages.MessageField('LinuxGroupView', 1, repeated=True)
kind = _messages.StringField(2, default=u'clouduseraccounts#linuxAccountViews')
userViews = _messages.MessageField('LinuxUserView', 3, repeated=True)
class LinuxGetAuthorizedKeysViewResponse(_messages.Message):
"""A LinuxGetAuthorizedKeysViewResponse object.
Fields:
resource: [Output Only] A list of authorized public keys for a user.
"""
resource = _messages.MessageField('AuthorizedKeysView', 1)
class LinuxGetLinuxAccountViewsResponse(_messages.Message):
"""A LinuxGetLinuxAccountViewsResponse object.
Fields:
resource: [Output Only] A list of authorized user accounts and groups.
"""
resource = _messages.MessageField('LinuxAccountViews', 1)
class LinuxGroupView(_messages.Message):
"""A detailed view of a Linux group.
Fields:
gid: [Output Only] The Group ID.
groupName: [Output Only] Group name.
members: [Output Only] List of user accounts that belong to the group.
"""
gid = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
groupName = _messages.StringField(2)
members = _messages.StringField(3, repeated=True)
class LinuxUserView(_messages.Message):
"""A detailed view of a Linux user account.
Fields:
gecos: [Output Only] The GECOS (user information) entry for this account.
gid: [Output Only] User's default group ID.
homeDirectory: [Output Only] The path to the home directory for this
account.
shell: [Output Only] The path to the login shell for this account.
uid: [Output Only] User ID.
username: [Output Only] The username of the account.
"""
gecos = _messages.StringField(1)
gid = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
homeDirectory = _messages.StringField(3)
shell = _messages.StringField(4)
uid = _messages.IntegerField(5, variant=_messages.Variant.UINT32)
username = _messages.StringField(6)
class Operation(_messages.Message):
"""An Operation resource, used to manage asynchronous API requests.
Enums:
StatusValueValuesEnum: [Output Only] The status of the operation, which
can be one of the following: PENDING, RUNNING, or DONE.
Messages:
ErrorValue: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
clientOperationId: [Output Only] Reserved for future use.
creationTimestamp: [Deprecated] This field is deprecated.
description: [Output Only] A textual description of the operation, which
is set when the operation is created.
endTime: [Output Only] The time that this operation was completed. This
value is in RFC3339 text format.
error: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
httpErrorMessage: [Output Only] If the operation fails, this field
contains the HTTP error message that was returned, such as NOT FOUND.
httpErrorStatusCode: [Output Only] If the operation fails, this field
contains the HTTP error status code that was returned. For example, a
404 means the resource was not found.
id: [Output Only] The unique identifier for the resource. This identifier
is defined by the server.
insertTime: [Output Only] The time that this operation was requested. This
value is in RFC3339 text format.
kind: [Output Only] Type of the resource. Always compute#operation for
Operation resources.
name: [Output Only] Name of the resource.
operationType: [Output Only] The type of operation, such as insert,
update, or delete, and so on.
progress: [Output Only] An optional progress indicator that ranges from 0
to 100. There is no requirement that this be linear or support any
granularity of operations. This should not be used to guess when the
operation will be complete. This number should monotonically increase as
the operation progresses.
region: [Output Only] The URL of the region where the operation resides.
Only available when performing regional operations.
selfLink: [Output Only] Server-defined URL for the resource.
startTime: [Output Only] The time that this operation was started by the
server. This value is in RFC3339 text format.
status: [Output Only] The status of the operation, which can be one of the
following: PENDING, RUNNING, or DONE.
statusMessage: [Output Only] An optional textual description of the
current status of the operation.
targetId: [Output Only] The unique target ID, which identifies a specific
incarnation of the target resource.
targetLink: [Output Only] The URL of the resource that the operation
modifies. For operations related to creating a snapshot, this points to
the persistent disk that the snapshot was created from.
user: [Output Only] User who requested the operation, for example:
<EMAIL>.
warnings: [Output Only] If warning messages are generated during
processing of the operation, this field will be populated.
zone: [Output Only] The URL of the zone where the operation resides. Only
available when performing per-zone operations.
"""
class StatusValueValuesEnum(_messages.Enum):
"""[Output Only] The status of the operation, which can be one of the
following: PENDING, RUNNING, or DONE.
Values:
DONE: <no description>
PENDING: <no description>
RUNNING: <no description>
"""
DONE = 0
PENDING = 1
RUNNING = 2
class ErrorValue(_messages.Message):
"""[Output Only] If errors are generated during processing of the
operation, this field will be populated.
Messages:
ErrorsValueListEntry: A ErrorsValueListEntry object.
Fields:
errors: [Output Only] The array of errors encountered while processing
this operation.
"""
class ErrorsValueListEntry(_messages.Message):
"""A ErrorsValueListEntry object.
Fields:
code: [Output Only] The error type identifier for this error.
location: [Output Only] Indicates the field in the request that caused
the error. This property is optional.
message: [Output Only] An optional, human-readable error message.
"""
code = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
errors = _messages.MessageField('ErrorsValueListEntry', 1, repeated=True)
class WarningsValueListEntry(_messages.Message):
"""A WarningsValueListEntry object.
Enums:
CodeValueValuesEnum: [Output Only] A warning code, if applicable. For
example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no
results in the response.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class CodeValueValuesEnum(_messages.Enum):
"""[Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
Values:
CLEANUP_FAILED: <no description>
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
FIELD_VALUE_OVERRIDEN: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NOT_CRITICAL_ERROR: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING: <no description>
RESOURCE_NOT_DELETED: <no description>
SINGLE_INSTANCE_PROPERTY_TEMPLATE: <no description>
UNREACHABLE: <no description>
"""
CLEANUP_FAILED = 0
DEPRECATED_RESOURCE_USED = 1
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 2
FIELD_VALUE_OVERRIDEN = 3
INJECTED_KERNELS_DEPRECATED = 4
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 5
NEXT_HOP_CANNOT_IP_FORWARD = 6
NEXT_HOP_INSTANCE_NOT_FOUND = 7
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 8
NEXT_HOP_NOT_RUNNING = 9
NOT_CRITICAL_ERROR = 10
NO_RESULTS_ON_PAGE = 11
REQUIRED_TOS_AGREEMENT = 12
RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING = 13
RESOURCE_NOT_DELETED = 14
SINGLE_INSTANCE_PROPERTY_TEMPLATE = 15
UNREACHABLE = 16
class DataValueListEntry(_messages.Message):
"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.EnumField('CodeValueValuesEnum', 1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
clientOperationId = _messages.StringField(1)
creationTimestamp = _messages.StringField(2)
description = _messages.StringField(3)
endTime = _messages.StringField(4)
error = _messages.MessageField('ErrorValue', 5)
httpErrorMessage = _messages.StringField(6)
httpErrorStatusCode = _messages.IntegerField(7, variant=_messages.Variant.INT32)
id = _messages.IntegerField(8, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(9)
kind = _messages.StringField(10, default=u'clouduseraccounts#operation')
name = _messages.StringField(11)
operationType = _messages.StringField(12)
progress = _messages.IntegerField(13, variant=_messages.Variant.INT32)
region = _messages.StringField(14)
selfLink = _messages.StringField(15)
startTime = _messages.StringField(16)
status = _messages.EnumField('StatusValueValuesEnum', 17)
statusMessage = _messages.StringField(18)
targetId = _messages.IntegerField(19, variant=_messages.Variant.UINT64)
targetLink = _messages.StringField(20)
user = _messages.StringField(21)
warnings = _messages.MessageField('WarningsValueListEntry', 22, repeated=True)
zone = _messages.StringField(23)
class OperationList(_messages.Message):
"""Contains a list of Operation resources.
Fields:
id: [Output Only] The unique identifier for the resource. This identifier
is defined by the server.
items: [Output Only] A list of Operation resources.
kind: [Output Only] Type of resource. Always compute#operations for
Operations resource.
nextPageToken: [Output Only] This token allows you to get the next page of
results for list requests. If the number of results is larger than
maxResults, use the nextPageToken as a value for the query parameter
pageToken in the next list request. Subsequent list requests will have
their own nextPageToken to continue paging through the results.
selfLink: [Output Only] Server-defined URL for this resource.
"""
id = _messages.StringField(1)
items = _messages.MessageField('Operation', 2, repeated=True)
kind = _messages.StringField(3, default=u'clouduseraccounts#operationList')
nextPageToken = _messages.StringField(4)
selfLink = _messages.StringField(5)
class PublicKey(_messages.Message):
"""A public key for authenticating to guests.
Fields:
creationTimestamp: [Output Only] Creation timestamp in RFC3339 text
format.
description: An optional textual description of the resource; provided by
the client when the resource is created.
expirationTimestamp: Optional expiration timestamp. If provided, the
timestamp must be in RFC3339 text format. If not provided, the public
key never expires.
fingerprint: [Output Only] The fingerprint of the key is defined by
RFC4716 to be the MD5 digest of the public key.
key: Public key text in SSH format, defined by RFC4253 section 6.6.
"""
creationTimestamp = _messages.StringField(1)
description = _messages.StringField(2)
expirationTimestamp = _messages.StringField(3)
fingerprint = _messages.StringField(4)
key = _messages.StringField(5)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class User(_messages.Message):
"""A User resource.
Fields:
creationTimestamp: [Output Only] Creation timestamp in RFC3339 text
format.
description: An optional textual description of the resource; provided by
the client when the resource is created.
groups: [Output Only] A list of URLs to Group resources who contain the
user. Users are only members of groups in the same project.
id: [Output Only] Unique identifier for the resource; defined by the
server.
kind: [Output Only] Type of the resource. Always clouduseraccounts#user
for users.
name: Name of the resource; provided by the client when the resource is
created.
owner: Email address of account's owner. This account will be validated to
make sure it exists. The email can belong to any domain, but it must be
tied to a Google account.
publicKeys: [Output Only] Public keys that this user may use to login.
selfLink: [Output Only] Server defined URL for the resource.
"""
creationTimestamp = _messages.StringField(1)
description = _messages.StringField(2)
groups = _messages.StringField(3, repeated=True)
id = _messages.IntegerField(4, variant=_messages.Variant.UINT64)
kind = _messages.StringField(5, default=u'clouduseraccounts#user')
name = _messages.StringField(6)
owner = _messages.StringField(7)
publicKeys = _messages.MessageField('PublicKey', 8, repeated=True)
selfLink = _messages.StringField(9)
class UserList(_messages.Message):
"""A UserList object.
Fields:
id: [Output Only] Unique identifier for the resource; defined by the
server.
items: [Output Only] A list of User resources.
kind: [Output Only] Type of resource. Always clouduseraccounts#userList
for lists of users.
nextPageToken: [Output Only] A token used to continue a truncated list
request.
selfLink: [Output Only] Server defined URL for this resource.
"""
id = _messages.StringField(1)
items = _messages.MessageField('User', 2, repeated=True)
kind = _messages.StringField(3, default=u'clouduseraccounts#userList')
nextPageToken = _messages.StringField(4)
selfLink = _messages.StringField(5) | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/clouduseraccounts/beta/clouduseraccounts_beta_messages.py |
from apitools.base.protorpclite import messages as _messages
package = 'clouduseraccounts'
class AuthorizedKeysView(_messages.Message):
"""A list of authorized public keys for a user account.
Fields:
keys: [Output Only] The list of authorized public keys in SSH format.
sudoer: [Output Only] Whether the user has the ability to elevate on the
instance that requested the authorized keys.
"""
keys = _messages.StringField(1, repeated=True)
sudoer = _messages.BooleanField(2)
class ClouduseraccountsGlobalAccountsOperationsDeleteRequest(_messages.Message):
"""A ClouduseraccountsGlobalAccountsOperationsDeleteRequest object.
Fields:
operation: Name of the Operations resource to delete.
project: Project ID for this request.
"""
operation = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGlobalAccountsOperationsDeleteResponse(_messages.Message):
"""An empty ClouduseraccountsGlobalAccountsOperationsDelete response."""
class ClouduseraccountsGlobalAccountsOperationsGetRequest(_messages.Message):
"""A ClouduseraccountsGlobalAccountsOperationsGetRequest object.
Fields:
operation: Name of the Operations resource to return.
project: Project ID for this request.
"""
operation = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGlobalAccountsOperationsListRequest(_messages.Message):
"""A ClouduseraccountsGlobalAccountsOperationsListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests.
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: Project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class ClouduseraccountsGroupsAddMemberRequest(_messages.Message):
"""A ClouduseraccountsGroupsAddMemberRequest object.
Fields:
groupName: Name of the group for this request.
groupsAddMemberRequest: A GroupsAddMemberRequest resource to be passed as
the request body.
project: Project ID for this request.
"""
groupName = _messages.StringField(1, required=True)
groupsAddMemberRequest = _messages.MessageField('GroupsAddMemberRequest', 2)
project = _messages.StringField(3, required=True)
class ClouduseraccountsGroupsDeleteRequest(_messages.Message):
"""A ClouduseraccountsGroupsDeleteRequest object.
Fields:
groupName: Name of the Group resource to delete.
project: Project ID for this request.
"""
groupName = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGroupsGetRequest(_messages.Message):
"""A ClouduseraccountsGroupsGetRequest object.
Fields:
groupName: Name of the Group resource to return.
project: Project ID for this request.
"""
groupName = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGroupsInsertRequest(_messages.Message):
"""A ClouduseraccountsGroupsInsertRequest object.
Fields:
group: A Group resource to be passed as the request body.
project: Project ID for this request.
"""
group = _messages.MessageField('Group', 1)
project = _messages.StringField(2, required=True)
class ClouduseraccountsGroupsListRequest(_messages.Message):
"""A ClouduseraccountsGroupsListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests.
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: Project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class ClouduseraccountsGroupsRemoveMemberRequest(_messages.Message):
"""A ClouduseraccountsGroupsRemoveMemberRequest object.
Fields:
groupName: Name of the group for this request.
groupsRemoveMemberRequest: A GroupsRemoveMemberRequest resource to be
passed as the request body.
project: Project ID for this request.
"""
groupName = _messages.StringField(1, required=True)
groupsRemoveMemberRequest = _messages.MessageField('GroupsRemoveMemberRequest', 2)
project = _messages.StringField(3, required=True)
class ClouduseraccountsLinuxGetAuthorizedKeysViewRequest(_messages.Message):
"""A ClouduseraccountsLinuxGetAuthorizedKeysViewRequest object.
Fields:
instance: The fully-qualified URL of the virtual machine requesting the
view.
login: Whether the view was requested as part of a user-initiated login.
project: Project ID for this request.
user: The user account for which you want to get a list of authorized
public keys.
zone: Name of the zone for this request.
"""
instance = _messages.StringField(1, required=True)
login = _messages.BooleanField(2)
project = _messages.StringField(3, required=True)
user = _messages.StringField(4, required=True)
zone = _messages.StringField(5, required=True)
class ClouduseraccountsLinuxGetLinuxAccountViewsRequest(_messages.Message):
"""A ClouduseraccountsLinuxGetLinuxAccountViewsRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
instance: The fully-qualified URL of the virtual machine requesting the
views.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests.
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: Project ID for this request.
zone: Name of the zone for this request.
"""
filter = _messages.StringField(1)
instance = _messages.StringField(2, required=True)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(4)
pageToken = _messages.StringField(5)
project = _messages.StringField(6, required=True)
zone = _messages.StringField(7, required=True)
class ClouduseraccountsUsersAddPublicKeyRequest(_messages.Message):
"""A ClouduseraccountsUsersAddPublicKeyRequest object.
Fields:
project: Project ID for this request.
publicKey: A PublicKey resource to be passed as the request body.
user: Name of the user for this request.
"""
project = _messages.StringField(1, required=True)
publicKey = _messages.MessageField('PublicKey', 2)
user = _messages.StringField(3, required=True)
class ClouduseraccountsUsersDeleteRequest(_messages.Message):
"""A ClouduseraccountsUsersDeleteRequest object.
Fields:
project: Project ID for this request.
user: Name of the user resource to delete.
"""
project = _messages.StringField(1, required=True)
user = _messages.StringField(2, required=True)
class ClouduseraccountsUsersGetRequest(_messages.Message):
"""A ClouduseraccountsUsersGetRequest object.
Fields:
project: Project ID for this request.
user: Name of the user resource to return.
"""
project = _messages.StringField(1, required=True)
user = _messages.StringField(2, required=True)
class ClouduseraccountsUsersInsertRequest(_messages.Message):
"""A ClouduseraccountsUsersInsertRequest object.
Fields:
project: Project ID for this request.
user: A User resource to be passed as the request body.
"""
project = _messages.StringField(1, required=True)
user = _messages.MessageField('User', 2)
class ClouduseraccountsUsersListRequest(_messages.Message):
"""A ClouduseraccountsUsersListRequest object.
Fields:
filter: Sets a filter expression for filtering listed resources, in the
form filter={expression}. Your {expression} must be in the format:
field_name comparison_string literal_string. The field_name is the name
of the field you want to compare. Only atomic field types are supported
(string, number, boolean). The comparison_string must be either eq
(equals) or ne (not equals). The literal_string is the string value to
filter to. The literal value must be valid for the type of field you are
filtering by (string, number, boolean). For string fields, the literal
value is interpreted as a regular expression using RE2 syntax. The
literal value must match the entire field. For example, to filter for
instances that do not have a name of example-instance, you would use
filter=name ne example-instance. You can filter on nested fields. For
example, you could filter on instances that have set the
scheduling.automaticRestart field to true. Use filtering on nested
fields to take advantage of labels to organize and search for results
based on label values. To filter on multiple expressions, provide each
separate expression within parentheses. For example,
(scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple
expressions are treated as AND expressions, meaning that resources must
match all expressions to pass the filters.
maxResults: The maximum number of results per page that should be
returned. If the number of available results is larger than maxResults,
Compute Engine returns a nextPageToken that can be used to get the next
page of results in subsequent list requests.
orderBy: Sorts list results by a certain order. By default, results are
returned in alphanumerical order based on the resource name. You can
also sort results in descending order based on the creation timestamp
using orderBy="creationTimestamp desc". This sorts results based on the
creationTimestamp field in reverse chronological order (newest result
first). Use this to sort resources like operations so that the newest
operation is returned first. Currently, only sorting by name or
creationTimestamp desc is supported.
pageToken: Specifies a page token to use. Set pageToken to the
nextPageToken returned by a previous list request to get the next page
of results.
project: Project ID for this request.
"""
filter = _messages.StringField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32, default=500)
orderBy = _messages.StringField(3)
pageToken = _messages.StringField(4)
project = _messages.StringField(5, required=True)
class ClouduseraccountsUsersRemovePublicKeyRequest(_messages.Message):
"""A ClouduseraccountsUsersRemovePublicKeyRequest object.
Fields:
fingerprint: The fingerprint of the public key to delete. Public keys are
identified by their fingerprint, which is defined by RFC4716 to be the
MD5 digest of the public key.
project: Project ID for this request.
user: Name of the user for this request.
"""
fingerprint = _messages.StringField(1, required=True)
project = _messages.StringField(2, required=True)
user = _messages.StringField(3, required=True)
class Group(_messages.Message):
"""A Group resource.
Fields:
creationTimestamp: [Output Only] Creation timestamp in RFC3339 text
format.
description: An optional textual description of the resource; provided by
the client when the resource is created.
id: [Output Only] Unique identifier for the resource; defined by the
server.
kind: [Output Only] Type of the resource. Always clouduseraccounts#group
for groups.
members: [Output Only] A list of URLs to User resources who belong to the
group. Users may only be members of groups in the same project.
name: Name of the resource; provided by the client when the resource is
created.
selfLink: [Output Only] Server defined URL for the resource.
"""
creationTimestamp = _messages.StringField(1)
description = _messages.StringField(2)
id = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
kind = _messages.StringField(4, default=u'clouduseraccounts#group')
members = _messages.StringField(5, repeated=True)
name = _messages.StringField(6)
selfLink = _messages.StringField(7)
class GroupList(_messages.Message):
"""A GroupList object.
Fields:
id: [Output Only] Unique identifier for the resource; defined by the
server.
items: [Output Only] A list of Group resources.
kind: [Output Only] Type of resource. Always clouduseraccounts#groupList
for lists of groups.
nextPageToken: [Output Only] A token used to continue a truncated list
request.
selfLink: [Output Only] Server defined URL for this resource.
"""
id = _messages.StringField(1)
items = _messages.MessageField('Group', 2, repeated=True)
kind = _messages.StringField(3, default=u'clouduseraccounts#groupList')
nextPageToken = _messages.StringField(4)
selfLink = _messages.StringField(5)
class GroupsAddMemberRequest(_messages.Message):
"""A GroupsAddMemberRequest object.
Fields:
users: Fully-qualified URLs of the User resources to add.
"""
users = _messages.StringField(1, repeated=True)
class GroupsRemoveMemberRequest(_messages.Message):
"""A GroupsRemoveMemberRequest object.
Fields:
users: Fully-qualified URLs of the User resources to remove.
"""
users = _messages.StringField(1, repeated=True)
class LinuxAccountViews(_messages.Message):
"""A list of all Linux accounts for this project. This API is only used by
Compute Engine virtual machines to get information about user accounts for a
project or instance. Linux resources are read-only views into users and
groups managed by the Compute Engine Accounts API.
Fields:
groupViews: [Output Only] A list of all groups within a project.
kind: [Output Only] Type of the resource. Always
clouduseraccounts#linuxAccountViews for Linux resources.
userViews: [Output Only] A list of all users within a project.
"""
groupViews = _messages.MessageField('LinuxGroupView', 1, repeated=True)
kind = _messages.StringField(2, default=u'clouduseraccounts#linuxAccountViews')
userViews = _messages.MessageField('LinuxUserView', 3, repeated=True)
class LinuxGetAuthorizedKeysViewResponse(_messages.Message):
"""A LinuxGetAuthorizedKeysViewResponse object.
Fields:
resource: [Output Only] A list of authorized public keys for a user.
"""
resource = _messages.MessageField('AuthorizedKeysView', 1)
class LinuxGetLinuxAccountViewsResponse(_messages.Message):
"""A LinuxGetLinuxAccountViewsResponse object.
Fields:
resource: [Output Only] A list of authorized user accounts and groups.
"""
resource = _messages.MessageField('LinuxAccountViews', 1)
class LinuxGroupView(_messages.Message):
"""A detailed view of a Linux group.
Fields:
gid: [Output Only] The Group ID.
groupName: [Output Only] Group name.
members: [Output Only] List of user accounts that belong to the group.
"""
gid = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
groupName = _messages.StringField(2)
members = _messages.StringField(3, repeated=True)
class LinuxUserView(_messages.Message):
"""A detailed view of a Linux user account.
Fields:
gecos: [Output Only] The GECOS (user information) entry for this account.
gid: [Output Only] User's default group ID.
homeDirectory: [Output Only] The path to the home directory for this
account.
shell: [Output Only] The path to the login shell for this account.
uid: [Output Only] User ID.
username: [Output Only] The username of the account.
"""
gecos = _messages.StringField(1)
gid = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
homeDirectory = _messages.StringField(3)
shell = _messages.StringField(4)
uid = _messages.IntegerField(5, variant=_messages.Variant.UINT32)
username = _messages.StringField(6)
class Operation(_messages.Message):
"""An Operation resource, used to manage asynchronous API requests.
Enums:
StatusValueValuesEnum: [Output Only] The status of the operation, which
can be one of the following: PENDING, RUNNING, or DONE.
Messages:
ErrorValue: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
clientOperationId: [Output Only] Reserved for future use.
creationTimestamp: [Deprecated] This field is deprecated.
description: [Output Only] A textual description of the operation, which
is set when the operation is created.
endTime: [Output Only] The time that this operation was completed. This
value is in RFC3339 text format.
error: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
httpErrorMessage: [Output Only] If the operation fails, this field
contains the HTTP error message that was returned, such as NOT FOUND.
httpErrorStatusCode: [Output Only] If the operation fails, this field
contains the HTTP error status code that was returned. For example, a
404 means the resource was not found.
id: [Output Only] The unique identifier for the resource. This identifier
is defined by the server.
insertTime: [Output Only] The time that this operation was requested. This
value is in RFC3339 text format.
kind: [Output Only] Type of the resource. Always compute#operation for
Operation resources.
name: [Output Only] Name of the resource.
operationType: [Output Only] The type of operation, such as insert,
update, or delete, and so on.
progress: [Output Only] An optional progress indicator that ranges from 0
to 100. There is no requirement that this be linear or support any
granularity of operations. This should not be used to guess when the
operation will be complete. This number should monotonically increase as
the operation progresses.
region: [Output Only] The URL of the region where the operation resides.
Only available when performing regional operations.
selfLink: [Output Only] Server-defined URL for the resource.
startTime: [Output Only] The time that this operation was started by the
server. This value is in RFC3339 text format.
status: [Output Only] The status of the operation, which can be one of the
following: PENDING, RUNNING, or DONE.
statusMessage: [Output Only] An optional textual description of the
current status of the operation.
targetId: [Output Only] The unique target ID, which identifies a specific
incarnation of the target resource.
targetLink: [Output Only] The URL of the resource that the operation
modifies. For operations related to creating a snapshot, this points to
the persistent disk that the snapshot was created from.
user: [Output Only] User who requested the operation, for example:
<EMAIL>.
warnings: [Output Only] If warning messages are generated during
processing of the operation, this field will be populated.
zone: [Output Only] The URL of the zone where the operation resides. Only
available when performing per-zone operations.
"""
class StatusValueValuesEnum(_messages.Enum):
"""[Output Only] The status of the operation, which can be one of the
following: PENDING, RUNNING, or DONE.
Values:
DONE: <no description>
PENDING: <no description>
RUNNING: <no description>
"""
DONE = 0
PENDING = 1
RUNNING = 2
class ErrorValue(_messages.Message):
"""[Output Only] If errors are generated during processing of the
operation, this field will be populated.
Messages:
ErrorsValueListEntry: A ErrorsValueListEntry object.
Fields:
errors: [Output Only] The array of errors encountered while processing
this operation.
"""
class ErrorsValueListEntry(_messages.Message):
"""A ErrorsValueListEntry object.
Fields:
code: [Output Only] The error type identifier for this error.
location: [Output Only] Indicates the field in the request that caused
the error. This property is optional.
message: [Output Only] An optional, human-readable error message.
"""
code = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
errors = _messages.MessageField('ErrorsValueListEntry', 1, repeated=True)
class WarningsValueListEntry(_messages.Message):
"""A WarningsValueListEntry object.
Enums:
CodeValueValuesEnum: [Output Only] A warning code, if applicable. For
example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no
results in the response.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class CodeValueValuesEnum(_messages.Enum):
"""[Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
Values:
CLEANUP_FAILED: <no description>
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
FIELD_VALUE_OVERRIDEN: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NOT_CRITICAL_ERROR: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING: <no description>
RESOURCE_NOT_DELETED: <no description>
SINGLE_INSTANCE_PROPERTY_TEMPLATE: <no description>
UNREACHABLE: <no description>
"""
CLEANUP_FAILED = 0
DEPRECATED_RESOURCE_USED = 1
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 2
FIELD_VALUE_OVERRIDEN = 3
INJECTED_KERNELS_DEPRECATED = 4
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 5
NEXT_HOP_CANNOT_IP_FORWARD = 6
NEXT_HOP_INSTANCE_NOT_FOUND = 7
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 8
NEXT_HOP_NOT_RUNNING = 9
NOT_CRITICAL_ERROR = 10
NO_RESULTS_ON_PAGE = 11
REQUIRED_TOS_AGREEMENT = 12
RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING = 13
RESOURCE_NOT_DELETED = 14
SINGLE_INSTANCE_PROPERTY_TEMPLATE = 15
UNREACHABLE = 16
class DataValueListEntry(_messages.Message):
"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.EnumField('CodeValueValuesEnum', 1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
clientOperationId = _messages.StringField(1)
creationTimestamp = _messages.StringField(2)
description = _messages.StringField(3)
endTime = _messages.StringField(4)
error = _messages.MessageField('ErrorValue', 5)
httpErrorMessage = _messages.StringField(6)
httpErrorStatusCode = _messages.IntegerField(7, variant=_messages.Variant.INT32)
id = _messages.IntegerField(8, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(9)
kind = _messages.StringField(10, default=u'clouduseraccounts#operation')
name = _messages.StringField(11)
operationType = _messages.StringField(12)
progress = _messages.IntegerField(13, variant=_messages.Variant.INT32)
region = _messages.StringField(14)
selfLink = _messages.StringField(15)
startTime = _messages.StringField(16)
status = _messages.EnumField('StatusValueValuesEnum', 17)
statusMessage = _messages.StringField(18)
targetId = _messages.IntegerField(19, variant=_messages.Variant.UINT64)
targetLink = _messages.StringField(20)
user = _messages.StringField(21)
warnings = _messages.MessageField('WarningsValueListEntry', 22, repeated=True)
zone = _messages.StringField(23)
class OperationList(_messages.Message):
"""Contains a list of Operation resources.
Fields:
id: [Output Only] The unique identifier for the resource. This identifier
is defined by the server.
items: [Output Only] A list of Operation resources.
kind: [Output Only] Type of resource. Always compute#operations for
Operations resource.
nextPageToken: [Output Only] This token allows you to get the next page of
results for list requests. If the number of results is larger than
maxResults, use the nextPageToken as a value for the query parameter
pageToken in the next list request. Subsequent list requests will have
their own nextPageToken to continue paging through the results.
selfLink: [Output Only] Server-defined URL for this resource.
"""
id = _messages.StringField(1)
items = _messages.MessageField('Operation', 2, repeated=True)
kind = _messages.StringField(3, default=u'clouduseraccounts#operationList')
nextPageToken = _messages.StringField(4)
selfLink = _messages.StringField(5)
class PublicKey(_messages.Message):
"""A public key for authenticating to guests.
Fields:
creationTimestamp: [Output Only] Creation timestamp in RFC3339 text
format.
description: An optional textual description of the resource; provided by
the client when the resource is created.
expirationTimestamp: Optional expiration timestamp. If provided, the
timestamp must be in RFC3339 text format. If not provided, the public
key never expires.
fingerprint: [Output Only] The fingerprint of the key is defined by
RFC4716 to be the MD5 digest of the public key.
key: Public key text in SSH format, defined by RFC4253 section 6.6.
"""
creationTimestamp = _messages.StringField(1)
description = _messages.StringField(2)
expirationTimestamp = _messages.StringField(3)
fingerprint = _messages.StringField(4)
key = _messages.StringField(5)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class User(_messages.Message):
"""A User resource.
Fields:
creationTimestamp: [Output Only] Creation timestamp in RFC3339 text
format.
description: An optional textual description of the resource; provided by
the client when the resource is created.
groups: [Output Only] A list of URLs to Group resources who contain the
user. Users are only members of groups in the same project.
id: [Output Only] Unique identifier for the resource; defined by the
server.
kind: [Output Only] Type of the resource. Always clouduseraccounts#user
for users.
name: Name of the resource; provided by the client when the resource is
created.
owner: Email address of account's owner. This account will be validated to
make sure it exists. The email can belong to any domain, but it must be
tied to a Google account.
publicKeys: [Output Only] Public keys that this user may use to login.
selfLink: [Output Only] Server defined URL for the resource.
"""
creationTimestamp = _messages.StringField(1)
description = _messages.StringField(2)
groups = _messages.StringField(3, repeated=True)
id = _messages.IntegerField(4, variant=_messages.Variant.UINT64)
kind = _messages.StringField(5, default=u'clouduseraccounts#user')
name = _messages.StringField(6)
owner = _messages.StringField(7)
publicKeys = _messages.MessageField('PublicKey', 8, repeated=True)
selfLink = _messages.StringField(9)
class UserList(_messages.Message):
"""A UserList object.
Fields:
id: [Output Only] Unique identifier for the resource; defined by the
server.
items: [Output Only] A list of User resources.
kind: [Output Only] Type of resource. Always clouduseraccounts#userList
for lists of users.
nextPageToken: [Output Only] A token used to continue a truncated list
request.
selfLink: [Output Only] Server defined URL for this resource.
"""
id = _messages.StringField(1)
items = _messages.MessageField('User', 2, repeated=True)
kind = _messages.StringField(3, default=u'clouduseraccounts#userList')
nextPageToken = _messages.StringField(4)
selfLink = _messages.StringField(5) | 0.867598 | 0.308613 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import skflow
### Training data
# Download dbpedia_csv.tar.gz from
# https://drive.google.com/folderview?id=0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M
# Unpack: tar -xvf dbpedia_csv.tar.gz
train = pandas.read_csv('dbpedia_csv/train.csv', header=None)
X_train, y_train = train[2], train[0]
test = pandas.read_csv('dbpedia_csv/test.csv', header=None)
X_test, y_test = test[2], test[0]
### Process vocabulary
MAX_DOCUMENT_LENGTH = 100
vocab_processor = skflow.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(vocab_processor.fit_transform(X_train)))
X_test = np.array(list(vocab_processor.transform(X_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
### Models
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def cnn_model(X, y):
"""2 layer Convolutional network to predict from sequence of words
to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = skflow.ops.conv2d(word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convlution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = skflow.ops.conv2d(pool1, N_FILTERS, FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
return skflow.models.logistic_regression(pool2, y)
classifier = skflow.TensorFlowEstimator(model_fn=cnn_model, n_classes=15,
steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continuously train for 1000 steps & predict on test set.
while True:
classifier.fit(X_train, y_train, logdir='/tmp/tf_examples/word_cnn')
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score)) | tensorflow/examples/skflow/text_classification_cnn.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import skflow
### Training data
# Download dbpedia_csv.tar.gz from
# https://drive.google.com/folderview?id=0Bz8a_Dbh9Qhbfll6bVpmNUtUcFdjYmF2SEpmZUZUcVNiMUw1TWN6RDV3a0JHT3kxLVhVR2M
# Unpack: tar -xvf dbpedia_csv.tar.gz
train = pandas.read_csv('dbpedia_csv/train.csv', header=None)
X_train, y_train = train[2], train[0]
test = pandas.read_csv('dbpedia_csv/test.csv', header=None)
X_test, y_test = test[2], test[0]
### Process vocabulary
MAX_DOCUMENT_LENGTH = 100
vocab_processor = skflow.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(vocab_processor.fit_transform(X_train)))
X_test = np.array(list(vocab_processor.transform(X_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
### Models
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def cnn_model(X, y):
"""2 layer Convolutional network to predict from sequence of words
to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = skflow.ops.conv2d(word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convlution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = skflow.ops.conv2d(pool1, N_FILTERS, FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
return skflow.models.logistic_regression(pool2, y)
classifier = skflow.TensorFlowEstimator(model_fn=cnn_model, n_classes=15,
steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continuously train for 1000 steps & predict on test set.
while True:
classifier.fit(X_train, y_train, logdir='/tmp/tf_examples/word_cnn')
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score)) | 0.851737 | 0.283564 |
from django.shortcuts import render, redirect, HttpResponse
from django.views.generic import ListView, CreateView,UpdateView,DeleteView, TemplateView, View
from apps.movimientos.models import *
from apps.movimientos.forms import *
from django.contrib.auth import authenticate, login
from django.contrib import messages
from django.urls import reverse_lazy
import datetime
def index(request):
return render(request,'index.html')
def mylogin(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('index')
else:
messages.error(request,'Datos incorrectos!!')
return render(request, 'login.html')
else:
return render(request, 'login.html')
class ReordenarCintas(View):
def get(self, request, *args, **kwargs):
ajdrs=Alojadores.objects.all()
movs=Movimiento.objects.all().order_by('-pk')
jsn ={
'msj':'llegó',
'alojadores':ajdrs,
'movs':movs
}
return render(request,'pag/c_reordenar.html',jsn)
class AlpListar(View):
def get(self, request, *args, **kwargs):
alps = Proyectos.objects.all()
jsn = {
'msj':'Lista de PROYECTOS',
'alp':alps
}
return render(request,'pag/c_lst.html',jsn)
class CinLstAlp(View):
def get(self, request, *args, **kwargs):
alp = self.request.GET.get('alp')
proy=Proyectos.objects.get(alp=alp)
procesos=Proyectos.objects.all()
for pc in procesos:
print(" > "+pc.cliente)
break
from django.db import connection, transaction
cursor = connection.cursor()
cursor.execute("CALL sp_lst_alp (%s)",[alp])
cintas = []
detalles = cursor.fetchall()
for row in detalles:
dic = dict(zip([col[0] for col in cursor.description], row))
cintas.append(dic)
cursor.close()
msj ="["+str(proy.alp)+"]:"+proy.nombre
jsn = {
'msj':msj,
'cintas':cintas,
'procesos':procesos,
}
return render(request,'pag/c_lst.html',jsn)
from django.db.models import Q
class ActulizarUbicacion(View):
def get(self, request, *args, **kwargs):
cod = self.request.GET.get('cod')
mov = self.request.GET.get('mov')
alj = self.request.GET.get('alj')
pos = self.request.GET.get('pos')
rpta = "false"
#print(" > cod:"+cod+" > mov:"+mov+" > alj:"+alj+" >pos:"+pos)
if Cinta.objects.filter(codigo=cod).exists():
cnta=Cinta.objects.get(codigo=cod)
if UbicacionCinta.objects.filter(Q(id_cinta=cnta) & Q(estado=1)).exists():
UbicacionCinta.objects.filter(id_cinta=cnta).update(estado=2)
print(" ->> Ubicación anerior descartada / se crea nueva ")
else:
print(" :. no existen ubicaciones anteriores.")
alj1=Alojadores.objects.get(pk=alj)
mov1=Movimiento.objects.get(pk=mov)
ubcnt = UbicacionCinta(id_cinta =cnta,id_alojador=alj1,id_movimiento=mov1,posicion=pos,estado=1)
ubcnt.save()
rpta="true"
return HttpResponse(rpta)
class MovimientoCreate(CreateView):
model=Movimiento
form_class=MovimientoForm
template_name='pag/m_cre.html'
success_url=reverse_lazy('c_reordenar')
class MovimientoCreate1(View):
def get(self, request, *args, **kwargs):
print(" > GET")
ida = self.request.GET.get('id_asuth')
des = self.request.GET.get('razon')
print(" -> GET: "+str(ida)+" ->"+des)
return reverse_lazy('c_reordenar')
def post(self, request, *args, **kwargs):
ida = self.request.POST.get('id_asuth')
des = self.request.POST.get('razon')
aut = AuthUser.objects.get(pk=ida)
dat=datetime.datetime.now()
hor=datetime.datetime.now()
movs=Movimiento.objects.all().order_by('-pk')
Mvnto=Movimiento(id_asuth=aut,fecha =dat,hora =hor,razon=des)
Mvnto.save()
print(" > se guardó")
ajdrs=Alojadores.objects.all()
jsn ={
'msj':'llegó',
'alojadores':ajdrs,
'movs':movs
}
return render(request,'pag/c_reordenar.html',jsn)
class CinLstAlj(View):
def get(self, request, *args, **kwargs):
ida = self.request.GET.get('ida')
print(" > "+str(ida))
from django.db import connection, transaction
cursor = connection.cursor()
cursor.execute("CALL sp_lst_cin_alsj (%s)",[ida])
lstCnts = []
detalles = cursor.fetchall()
for row in detalles:
dic = dict(zip([col[0] for col in cursor.description], row))
lstCnts.append(dic)
cursor.close()
aljs1=Alojadores.objects.all().order_by('nombre')
for a in aljs1:
print(" ->"+a)
print(" > "+str(ida))
jsn = {
'msj':'llegó',
'aljs':Alojadores.objects.all().order_by('nombre'),
'lstCnts':lstCnts
}
return render(request,'pag/c_lst_alj.html',jsn)
class AljCreate(CreateView):
model=Alojadores
form_class=AljForm
template_name='alj/a_reg.html'
success_url=reverse_lazy('a_lis')
class AljUpdate(UpdateView):
model=Alojadores
form_class=AljForm
template_name='alj/a_reg.html'
success_url=reverse_lazy('a_lis')
class AljDelete(DeleteView):
model=Alojadores
form_class=AljForm
template_name='alj/a_eli.html'
success_url=reverse_lazy('a_lis')
class AljList(ListView):
model=Alojadores
template_name='alj/a_lis.html'
paginate_by=9
class ProyCreate(CreateView):
model=Proyectos
form_class=ProyForm
template_name='proy/p_reg.html'
success_url=reverse_lazy('a_lis')
class ProyUpdate(UpdateView):
model=Proyectos
form_class=ProyForm
template_name='proy/p_reg.html'
success_url=reverse_lazy('a_lis')
class ProyDelete(DeleteView):
model=Proyectos
form_class=ProyForm
template_name='proy/p_eli.html'
success_url=reverse_lazy('a_lis')
class ProyList(ListView):
model=Proyectos
template_name='proy/p_lis.html'
paginate_by=9 | cintas01/apps/movimientos/views.py | from django.shortcuts import render, redirect, HttpResponse
from django.views.generic import ListView, CreateView,UpdateView,DeleteView, TemplateView, View
from apps.movimientos.models import *
from apps.movimientos.forms import *
from django.contrib.auth import authenticate, login
from django.contrib import messages
from django.urls import reverse_lazy
import datetime
def index(request):
return render(request,'index.html')
def mylogin(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('index')
else:
messages.error(request,'Datos incorrectos!!')
return render(request, 'login.html')
else:
return render(request, 'login.html')
class ReordenarCintas(View):
def get(self, request, *args, **kwargs):
ajdrs=Alojadores.objects.all()
movs=Movimiento.objects.all().order_by('-pk')
jsn ={
'msj':'llegó',
'alojadores':ajdrs,
'movs':movs
}
return render(request,'pag/c_reordenar.html',jsn)
class AlpListar(View):
def get(self, request, *args, **kwargs):
alps = Proyectos.objects.all()
jsn = {
'msj':'Lista de PROYECTOS',
'alp':alps
}
return render(request,'pag/c_lst.html',jsn)
class CinLstAlp(View):
def get(self, request, *args, **kwargs):
alp = self.request.GET.get('alp')
proy=Proyectos.objects.get(alp=alp)
procesos=Proyectos.objects.all()
for pc in procesos:
print(" > "+pc.cliente)
break
from django.db import connection, transaction
cursor = connection.cursor()
cursor.execute("CALL sp_lst_alp (%s)",[alp])
cintas = []
detalles = cursor.fetchall()
for row in detalles:
dic = dict(zip([col[0] for col in cursor.description], row))
cintas.append(dic)
cursor.close()
msj ="["+str(proy.alp)+"]:"+proy.nombre
jsn = {
'msj':msj,
'cintas':cintas,
'procesos':procesos,
}
return render(request,'pag/c_lst.html',jsn)
from django.db.models import Q
class ActulizarUbicacion(View):
def get(self, request, *args, **kwargs):
cod = self.request.GET.get('cod')
mov = self.request.GET.get('mov')
alj = self.request.GET.get('alj')
pos = self.request.GET.get('pos')
rpta = "false"
#print(" > cod:"+cod+" > mov:"+mov+" > alj:"+alj+" >pos:"+pos)
if Cinta.objects.filter(codigo=cod).exists():
cnta=Cinta.objects.get(codigo=cod)
if UbicacionCinta.objects.filter(Q(id_cinta=cnta) & Q(estado=1)).exists():
UbicacionCinta.objects.filter(id_cinta=cnta).update(estado=2)
print(" ->> Ubicación anerior descartada / se crea nueva ")
else:
print(" :. no existen ubicaciones anteriores.")
alj1=Alojadores.objects.get(pk=alj)
mov1=Movimiento.objects.get(pk=mov)
ubcnt = UbicacionCinta(id_cinta =cnta,id_alojador=alj1,id_movimiento=mov1,posicion=pos,estado=1)
ubcnt.save()
rpta="true"
return HttpResponse(rpta)
class MovimientoCreate(CreateView):
model=Movimiento
form_class=MovimientoForm
template_name='pag/m_cre.html'
success_url=reverse_lazy('c_reordenar')
class MovimientoCreate1(View):
def get(self, request, *args, **kwargs):
print(" > GET")
ida = self.request.GET.get('id_asuth')
des = self.request.GET.get('razon')
print(" -> GET: "+str(ida)+" ->"+des)
return reverse_lazy('c_reordenar')
def post(self, request, *args, **kwargs):
ida = self.request.POST.get('id_asuth')
des = self.request.POST.get('razon')
aut = AuthUser.objects.get(pk=ida)
dat=datetime.datetime.now()
hor=datetime.datetime.now()
movs=Movimiento.objects.all().order_by('-pk')
Mvnto=Movimiento(id_asuth=aut,fecha =dat,hora =hor,razon=des)
Mvnto.save()
print(" > se guardó")
ajdrs=Alojadores.objects.all()
jsn ={
'msj':'llegó',
'alojadores':ajdrs,
'movs':movs
}
return render(request,'pag/c_reordenar.html',jsn)
class CinLstAlj(View):
def get(self, request, *args, **kwargs):
ida = self.request.GET.get('ida')
print(" > "+str(ida))
from django.db import connection, transaction
cursor = connection.cursor()
cursor.execute("CALL sp_lst_cin_alsj (%s)",[ida])
lstCnts = []
detalles = cursor.fetchall()
for row in detalles:
dic = dict(zip([col[0] for col in cursor.description], row))
lstCnts.append(dic)
cursor.close()
aljs1=Alojadores.objects.all().order_by('nombre')
for a in aljs1:
print(" ->"+a)
print(" > "+str(ida))
jsn = {
'msj':'llegó',
'aljs':Alojadores.objects.all().order_by('nombre'),
'lstCnts':lstCnts
}
return render(request,'pag/c_lst_alj.html',jsn)
class AljCreate(CreateView):
model=Alojadores
form_class=AljForm
template_name='alj/a_reg.html'
success_url=reverse_lazy('a_lis')
class AljUpdate(UpdateView):
model=Alojadores
form_class=AljForm
template_name='alj/a_reg.html'
success_url=reverse_lazy('a_lis')
class AljDelete(DeleteView):
model=Alojadores
form_class=AljForm
template_name='alj/a_eli.html'
success_url=reverse_lazy('a_lis')
class AljList(ListView):
model=Alojadores
template_name='alj/a_lis.html'
paginate_by=9
class ProyCreate(CreateView):
model=Proyectos
form_class=ProyForm
template_name='proy/p_reg.html'
success_url=reverse_lazy('a_lis')
class ProyUpdate(UpdateView):
model=Proyectos
form_class=ProyForm
template_name='proy/p_reg.html'
success_url=reverse_lazy('a_lis')
class ProyDelete(DeleteView):
model=Proyectos
form_class=ProyForm
template_name='proy/p_eli.html'
success_url=reverse_lazy('a_lis')
class ProyList(ListView):
model=Proyectos
template_name='proy/p_lis.html'
paginate_by=9 | 0.148016 | 0.056185 |
import warnings
from cms.api import add_plugin, create_page
from cms.test_utils.testcases import CMSTestCase
from .test_app.cms_plugins import TestPluginPlugin
# we're testing the plugin generation from a sample django CMS addon in
# tests/test_app
class TestPluginTestCase(CMSTestCase):
def setUp(self):
self.language = "en"
self.page = create_page(
title="page",
template="page.html",
language=self.language,
)
self.page.publish(self.language)
self.placeholder = self.page.placeholders.get(slot="content")
self.superuser = self.get_superuser()
def tearDown(self):
self.page.delete()
self.superuser.delete()
def test_plugin_rendering(self):
request_url = self.page.get_absolute_url(self.language) + "?toolbar_off=true"
add_plugin(
placeholder=self.placeholder,
plugin_type=TestPluginPlugin.__name__,
language=self.language,
attributes1={"data-tracking": "google"},
attributes2={"class": "some new classes"},
)
self.page.publish(self.language)
with self.login_user_context(self.superuser):
response = self.client.get(request_url)
self.assertContains(response, "data-tracking")
self.assertContains(response, "google")
self.assertContains(response, "class")
self.assertContains(response, "some new classes")
def test_plugin_form(self):
request_url = self.get_add_plugin_uri(
placeholder=self.placeholder,
plugin_type=TestPluginPlugin.__name__,
language=self.language,
)
data = {
"label": "test",
"attributes_key[attributes1]": "data-tracking",
"attributes_value[attributes1]": "google",
"attributes_key[attributes2]": "class",
"attributes_value[attributes2]": "some new classes"
}
# test actual form rendering
with self.login_user_context(self.superuser), warnings.catch_warnings():
# hide the "DontUsePageAttributeWarning" warning when using
# `get_add_plugin_uri` to get cleaner test results
warnings.simplefilter("ignore")
response = self.client.post(request_url, data)
self.assertEquals(response.status_code, 200)
self.assertContains(response, '<div class="success">')
# test error for excluded keys
data = {
"label": "test",
"attributes_key[attributes2]": "style",
"attributes_value[attributes2]": "this fails"
}
# test actual form rendering
with self.login_user_context(self.superuser), warnings.catch_warnings():
# hide the "DontUsePageAttributeWarning" warning when using
# `get_add_plugin_uri` to get cleaner test results
warnings.simplefilter("ignore")
response = self.client.post(request_url, data)
self.assertContains(
response,
""style" is excluded by configuration and cannot be "
"used as a key.",
)
# test error if an invalid option is probided
data = {
"label": "test",
"attributes_key[attributes2]": "data test",
"attributes_value[attributes2]": "hello world"
}
# test actual form rendering
with self.login_user_context(self.superuser), warnings.catch_warnings():
# hide the "DontUsePageAttributeWarning" warning when using
# `get_add_plugin_uri` to get cleaner test results
warnings.simplefilter("ignore")
response = self.client.post(request_url, data)
self.assertContains(
response,
""data test" is not a valid key. Keys must start with "
"at least one letter and consist only of the letters, numbers, "
"underscores or hyphens.",
) | tests/test_plugins.py | import warnings
from cms.api import add_plugin, create_page
from cms.test_utils.testcases import CMSTestCase
from .test_app.cms_plugins import TestPluginPlugin
# we're testing the plugin generation from a sample django CMS addon in
# tests/test_app
class TestPluginTestCase(CMSTestCase):
def setUp(self):
self.language = "en"
self.page = create_page(
title="page",
template="page.html",
language=self.language,
)
self.page.publish(self.language)
self.placeholder = self.page.placeholders.get(slot="content")
self.superuser = self.get_superuser()
def tearDown(self):
self.page.delete()
self.superuser.delete()
def test_plugin_rendering(self):
request_url = self.page.get_absolute_url(self.language) + "?toolbar_off=true"
add_plugin(
placeholder=self.placeholder,
plugin_type=TestPluginPlugin.__name__,
language=self.language,
attributes1={"data-tracking": "google"},
attributes2={"class": "some new classes"},
)
self.page.publish(self.language)
with self.login_user_context(self.superuser):
response = self.client.get(request_url)
self.assertContains(response, "data-tracking")
self.assertContains(response, "google")
self.assertContains(response, "class")
self.assertContains(response, "some new classes")
def test_plugin_form(self):
request_url = self.get_add_plugin_uri(
placeholder=self.placeholder,
plugin_type=TestPluginPlugin.__name__,
language=self.language,
)
data = {
"label": "test",
"attributes_key[attributes1]": "data-tracking",
"attributes_value[attributes1]": "google",
"attributes_key[attributes2]": "class",
"attributes_value[attributes2]": "some new classes"
}
# test actual form rendering
with self.login_user_context(self.superuser), warnings.catch_warnings():
# hide the "DontUsePageAttributeWarning" warning when using
# `get_add_plugin_uri` to get cleaner test results
warnings.simplefilter("ignore")
response = self.client.post(request_url, data)
self.assertEquals(response.status_code, 200)
self.assertContains(response, '<div class="success">')
# test error for excluded keys
data = {
"label": "test",
"attributes_key[attributes2]": "style",
"attributes_value[attributes2]": "this fails"
}
# test actual form rendering
with self.login_user_context(self.superuser), warnings.catch_warnings():
# hide the "DontUsePageAttributeWarning" warning when using
# `get_add_plugin_uri` to get cleaner test results
warnings.simplefilter("ignore")
response = self.client.post(request_url, data)
self.assertContains(
response,
""style" is excluded by configuration and cannot be "
"used as a key.",
)
# test error if an invalid option is probided
data = {
"label": "test",
"attributes_key[attributes2]": "data test",
"attributes_value[attributes2]": "hello world"
}
# test actual form rendering
with self.login_user_context(self.superuser), warnings.catch_warnings():
# hide the "DontUsePageAttributeWarning" warning when using
# `get_add_plugin_uri` to get cleaner test results
warnings.simplefilter("ignore")
response = self.client.post(request_url, data)
self.assertContains(
response,
""data test" is not a valid key. Keys must start with "
"at least one letter and consist only of the letters, numbers, "
"underscores or hyphens.",
) | 0.724091 | 0.27125 |
import tensorflow as tf
from tensorflow.contrib import slim
from builders import frontend_builder
import numpy as np
import os, sys
# define a initializer
initializer = slim.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False)
# Use bilinear interpolation to adjust images to a fixed size.
def Upsampling(inputs,scale):
return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale])
def ConvBlock(inputs, n_filters, kernel_size=[3, 3], strides=1):
"""
Basic conv block for Encoder-Decoder
Apply successivly Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d(inputs, n_filters, kernel_size, stride=[strides, strides], activation_fn=None, normalizer_fn=None,
weights_initializer=initializer)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
return net
# ARM for attention mechanism
def AttentionRefinementModule(inputs, n_filters):
inputs = slim.conv2d(inputs, n_filters, [3, 3], activation_fn=None, weights_initializer=initializer)
inputs = tf.nn.relu(slim.batch_norm(inputs, fused=True))
# Global average pooling
net = tf.reduce_mean(inputs, [1, 2], keep_dims=True)
net = slim.conv2d(net, n_filters, kernel_size=[1, 1], weights_initializer=initializer)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = tf.multiply(inputs, net)
return net
# Feature fusion in the final stage
def FeatureFusionModule(input_1, input_2, input_3, n_filters):
inputs = tf.concat([input_1, input_2], axis=-1)
inputs = tf.concat([inputs, input_3], axis=-1)
inputs = ConvBlock(inputs, n_filters=n_filters, kernel_size=[3, 3])
# Global average pooling
net = tf.reduce_mean(inputs, [1, 2], keep_dims=True)
net = slim.conv2d(net, n_filters, kernel_size=[1, 1], weights_initializer=initializer)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = slim.conv2d(net, n_filters, kernel_size=[1, 1], weights_initializer=initializer)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = tf.multiply(inputs, net)
net = tf.add(inputs, net)
return net
# key component in this network
def AttentionAndFeatureFussion(input_1, input_2, n_filters):
net = ConvBlock(input_1, n_filters, kernel_size=[3, 3], strides=2)
net_global = tf.reduce_mean(net, [1, 2], keep_dims=True)
net_global = slim.conv2d(net_global, n_filters, kernel_size=[1, 1], weights_initializer=initializer)
net_global = slim.batch_norm(net_global, fused=True)
net_global = tf.nn.relu(net_global)
net_attention = tf.multiply(net_global, net)
input_2 = ConvBlock(input_2, n_filters, kernel_size=[1, 1])
net = tf.add(net_attention, input_2)
net = ConvBlock(net, n_filters, kernel_size=[1, 1], strides=1)
return net
# build the depth-wise AAFF2
def build_bisenet4(inputs, num_classes, preset_model='DepthwiseAAFF2', frontend="xception", weight_decay=1e-5, is_training=True, pretrained_dir="models"):
### The spatial path
### The number of feature maps for each convolution is not specified in the paper
### It was chosen here to be equal to the number of feature maps of a classification
### model at each corresponding stage
# depth-wise convolution
point_filter1 = tf.get_variable(name="point_filter1", shape=(1, 1, 64, 128), initializer=initializer)
point_filter2 = tf.get_variable(name="point_filter2", shape=(1, 1, 128, 256), initializer=initializer)
filter1 = tf.get_variable(name="filter1", shape=(3, 3, 64, 1), initializer=initializer)
filter2 = tf.get_variable(name="filter2", shape=(3, 3, 128, 1), initializer=initializer)
# spatial path
spatial_net = ConvBlock(inputs, n_filters=64, kernel_size=[3, 3], strides=2)
spatial_net = tf.nn.separable_conv2d(input=spatial_net, depthwise_filter=filter1, pointwise_filter=point_filter1, strides=[1,2,2,1], rate=[1,1], padding='SAME')
spatial_net = tf.nn.separable_conv2d(input=spatial_net, depthwise_filter=filter2, pointwise_filter=point_filter2, strides=[1,2,2,1], rate=[1,1], padding='SAME')
spatial_net = ConvBlock(spatial_net, n_filters=32, kernel_size=[1, 1])
### Context path
logits, end_points, frontend_scope, init_fn = frontend_builder.build_frontend(inputs, frontend, pretrained_dir=pretrained_dir, is_training=is_training)
size = tf.shape(end_points['pool5'])[1:3]
global_channels = tf.reduce_mean(end_points['pool5'], [1, 2], keep_dims=True)
global_channels = slim.conv2d(global_channels, 128, 1, [1, 1], activation_fn=None)
global_channels = tf.nn.relu(slim.batch_norm(global_channels, fused=True))
global_channels = tf.image.resize_bilinear(global_channels, size=size)
net_1 = AttentionAndFeatureFussion(end_points['pool3'], end_points['pool4'], 128)
net_2 = AttentionAndFeatureFussion(net_1, end_points['pool5'], 128)
net_2 = tf.add(global_channels, net_2)
net_2 = Upsampling(net_2, scale=2)
net_1_2 = tf.add(net_1, net_2)
net_1_2 = Upsampling(net_2, scale=2)
net_1_2 = ConvBlock(net_1_2, n_filters=64, kernel_size=[1, 1])
net_1_2_3 = tf.add(net_1_2, end_points['pool3'])
net_1_2_3 = ConvBlock(net_1_2_3, n_filters=128, kernel_size=[1, 1], strides=1)
context_path_left = AttentionRefinementModule(net_1_2_3, n_filters=128)
net_3 = AttentionAndFeatureFussion(end_points['pool3'], end_points['pool4'], 128)
net_4 = AttentionAndFeatureFussion(net_3, end_points['pool5'], 128)
net_4 = tf.add(global_channels, net_4)
net_4 = Upsampling(net_4, scale=2)
net_3_4 = tf.add(net_3, net_4)
net_3_4 = Upsampling(net_3_4, scale=2)
net_3_4 = ConvBlock(net_3_4, n_filters=64, kernel_size=[1, 1])
net_3_4_5 = tf.add(net_3_4, end_points['pool3'])
net_3_4_5 = ConvBlock(net_3_4_5, n_filters=128, kernel_size=[1, 1], strides=1)
context_path_right = AttentionRefinementModule(net_3_4_5, n_filters=128)
### Combining the paths
net = FeatureFusionModule(input_1=context_path_left, input_2=context_path_right, input_3=spatial_net, n_filters=256)
net = ConvBlock(net, n_filters=64, kernel_size=[3, 3])
### Final upscaling and finish
net = Upsampling(net, scale=2)
net = slim.conv2d(net, 64, [3, 3], rate=2, activation_fn=tf.nn.relu, biases_initializer=None,
normalizer_fn=slim.batch_norm, weights_initializer=initializer)
net = Upsampling(net, 4)
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits', weights_initializer=initializer)
return net, init_fn | models/DepthwiseAAFF2.py |
import tensorflow as tf
from tensorflow.contrib import slim
from builders import frontend_builder
import numpy as np
import os, sys
# define a initializer
initializer = slim.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False)
# Use bilinear interpolation to adjust images to a fixed size.
def Upsampling(inputs,scale):
return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale])
def ConvBlock(inputs, n_filters, kernel_size=[3, 3], strides=1):
"""
Basic conv block for Encoder-Decoder
Apply successivly Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d(inputs, n_filters, kernel_size, stride=[strides, strides], activation_fn=None, normalizer_fn=None,
weights_initializer=initializer)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
return net
# ARM for attention mechanism
def AttentionRefinementModule(inputs, n_filters):
inputs = slim.conv2d(inputs, n_filters, [3, 3], activation_fn=None, weights_initializer=initializer)
inputs = tf.nn.relu(slim.batch_norm(inputs, fused=True))
# Global average pooling
net = tf.reduce_mean(inputs, [1, 2], keep_dims=True)
net = slim.conv2d(net, n_filters, kernel_size=[1, 1], weights_initializer=initializer)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = tf.multiply(inputs, net)
return net
# Feature fusion in the final stage
def FeatureFusionModule(input_1, input_2, input_3, n_filters):
inputs = tf.concat([input_1, input_2], axis=-1)
inputs = tf.concat([inputs, input_3], axis=-1)
inputs = ConvBlock(inputs, n_filters=n_filters, kernel_size=[3, 3])
# Global average pooling
net = tf.reduce_mean(inputs, [1, 2], keep_dims=True)
net = slim.conv2d(net, n_filters, kernel_size=[1, 1], weights_initializer=initializer)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = slim.conv2d(net, n_filters, kernel_size=[1, 1], weights_initializer=initializer)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = tf.multiply(inputs, net)
net = tf.add(inputs, net)
return net
# key component in this network
def AttentionAndFeatureFussion(input_1, input_2, n_filters):
net = ConvBlock(input_1, n_filters, kernel_size=[3, 3], strides=2)
net_global = tf.reduce_mean(net, [1, 2], keep_dims=True)
net_global = slim.conv2d(net_global, n_filters, kernel_size=[1, 1], weights_initializer=initializer)
net_global = slim.batch_norm(net_global, fused=True)
net_global = tf.nn.relu(net_global)
net_attention = tf.multiply(net_global, net)
input_2 = ConvBlock(input_2, n_filters, kernel_size=[1, 1])
net = tf.add(net_attention, input_2)
net = ConvBlock(net, n_filters, kernel_size=[1, 1], strides=1)
return net
# build the depth-wise AAFF2
def build_bisenet4(inputs, num_classes, preset_model='DepthwiseAAFF2', frontend="xception", weight_decay=1e-5, is_training=True, pretrained_dir="models"):
### The spatial path
### The number of feature maps for each convolution is not specified in the paper
### It was chosen here to be equal to the number of feature maps of a classification
### model at each corresponding stage
# depth-wise convolution
point_filter1 = tf.get_variable(name="point_filter1", shape=(1, 1, 64, 128), initializer=initializer)
point_filter2 = tf.get_variable(name="point_filter2", shape=(1, 1, 128, 256), initializer=initializer)
filter1 = tf.get_variable(name="filter1", shape=(3, 3, 64, 1), initializer=initializer)
filter2 = tf.get_variable(name="filter2", shape=(3, 3, 128, 1), initializer=initializer)
# spatial path
spatial_net = ConvBlock(inputs, n_filters=64, kernel_size=[3, 3], strides=2)
spatial_net = tf.nn.separable_conv2d(input=spatial_net, depthwise_filter=filter1, pointwise_filter=point_filter1, strides=[1,2,2,1], rate=[1,1], padding='SAME')
spatial_net = tf.nn.separable_conv2d(input=spatial_net, depthwise_filter=filter2, pointwise_filter=point_filter2, strides=[1,2,2,1], rate=[1,1], padding='SAME')
spatial_net = ConvBlock(spatial_net, n_filters=32, kernel_size=[1, 1])
### Context path
logits, end_points, frontend_scope, init_fn = frontend_builder.build_frontend(inputs, frontend, pretrained_dir=pretrained_dir, is_training=is_training)
size = tf.shape(end_points['pool5'])[1:3]
global_channels = tf.reduce_mean(end_points['pool5'], [1, 2], keep_dims=True)
global_channels = slim.conv2d(global_channels, 128, 1, [1, 1], activation_fn=None)
global_channels = tf.nn.relu(slim.batch_norm(global_channels, fused=True))
global_channels = tf.image.resize_bilinear(global_channels, size=size)
net_1 = AttentionAndFeatureFussion(end_points['pool3'], end_points['pool4'], 128)
net_2 = AttentionAndFeatureFussion(net_1, end_points['pool5'], 128)
net_2 = tf.add(global_channels, net_2)
net_2 = Upsampling(net_2, scale=2)
net_1_2 = tf.add(net_1, net_2)
net_1_2 = Upsampling(net_2, scale=2)
net_1_2 = ConvBlock(net_1_2, n_filters=64, kernel_size=[1, 1])
net_1_2_3 = tf.add(net_1_2, end_points['pool3'])
net_1_2_3 = ConvBlock(net_1_2_3, n_filters=128, kernel_size=[1, 1], strides=1)
context_path_left = AttentionRefinementModule(net_1_2_3, n_filters=128)
net_3 = AttentionAndFeatureFussion(end_points['pool3'], end_points['pool4'], 128)
net_4 = AttentionAndFeatureFussion(net_3, end_points['pool5'], 128)
net_4 = tf.add(global_channels, net_4)
net_4 = Upsampling(net_4, scale=2)
net_3_4 = tf.add(net_3, net_4)
net_3_4 = Upsampling(net_3_4, scale=2)
net_3_4 = ConvBlock(net_3_4, n_filters=64, kernel_size=[1, 1])
net_3_4_5 = tf.add(net_3_4, end_points['pool3'])
net_3_4_5 = ConvBlock(net_3_4_5, n_filters=128, kernel_size=[1, 1], strides=1)
context_path_right = AttentionRefinementModule(net_3_4_5, n_filters=128)
### Combining the paths
net = FeatureFusionModule(input_1=context_path_left, input_2=context_path_right, input_3=spatial_net, n_filters=256)
net = ConvBlock(net, n_filters=64, kernel_size=[3, 3])
### Final upscaling and finish
net = Upsampling(net, scale=2)
net = slim.conv2d(net, 64, [3, 3], rate=2, activation_fn=tf.nn.relu, biases_initializer=None,
normalizer_fn=slim.batch_norm, weights_initializer=initializer)
net = Upsampling(net, 4)
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits', weights_initializer=initializer)
return net, init_fn | 0.813868 | 0.554229 |
from __future__ import absolute_import
from webtest import TestApp as Client
import bowerstatic
import json
import mock
import os
import pytest
def test_injector_specific_path():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_specific_path_wrong_file():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
with pytest.raises(bowerstatic.Error):
include('jquery/nonexistent.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
c.get('/')
def test_injector_does_not_fail_for_401_responses_with_no_content_type():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
# Can not use 401 here as webtest only accepts 200 or 3xx, which is ok
# as we want to test the behaviour if no content type is given
start_response('302', [('Content-Type', None)])
include = components.includer(environ)
with pytest.raises(bowerstatic.Error):
include('jquery/nonexistent.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
# webtest checks, in contracy to pyramid, the headers and breaks if one of
# them is not a string.
with mock.patch('webtest.lint.check_headers'):
c.get('/')
def test_injector_specific_path_wrong_file_then_added(tmpdir):
bower_components_dir = tmpdir.mkdir('bower_components')
component_dir = bower_components_dir.mkdir('component')
bower_json_file = component_dir.join('.bower.json')
bower_json_file.write(json.dumps({
'name': 'component',
'version': '2.1',
'main': 'main.js'
}))
main_js_file = component_dir.join('main.js')
main_js_file.write('/* this is main.js */')
bower = bowerstatic.Bower()
components = bower.components('components', bower_components_dir.strpath)
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('component/notyet.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
with pytest.raises(bowerstatic.Error):
c.get('/')
# now we add the nonexistent file
notyet_file = component_dir.join('notyet.js')
notyet_file.write('/* this is notyet.js */')
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/component/2.1/notyet.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_wrong_component():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
with pytest.raises(bowerstatic.Error):
include('nonexistent/nonexistent.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
c.get('/')
@pytest.mark.xfail
def test_injector_wrong_component_then_added(tmpdir):
bower_components_dir = tmpdir.mkdir('bower_components')
bower = bowerstatic.Bower()
components = bower.components('components', bower_components_dir.strpath)
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('component/main.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
with pytest.raises(bowerstatic.Error):
c.get('/')
# now add the component
component_dir = bower_components_dir.mkdir('component')
bower_json_file = component_dir.join('.bower.json')
bower_json_file.write(json.dumps({
'name': 'component',
'version': '2.1',
'main': 'main.js'
}))
main_js_file = component_dir.join('main.js')
main_js_file.write('/* this is main.js */')
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/component/2.1/main.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_specific_resource():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
jquery = components.resource('jquery/dist/jquery.js')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include(jquery)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_endpoint_path():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_endpoint_main_missing():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('missing_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
# without a main, it should just include nothing
assert response.body == (
b'<html><head></head><body>Hello!</body></html>')
def test_injector_endpoint_depends_on_main_missing():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('depends_on_missing_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
# without a main, it should just include nothing
assert response.body == (
b'<html><head><script type="text/javascript" '
b'src="/bowerstatic/components/depends_on_missing_main/'
b'2.1.1/resource.js"></script></head><body>Hello!</body></html>')
def test_injector_missing_version_bower_components():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('missing-version-in-dot-bower-json')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
# without a main, it should just include nothing
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/missing-version-in-dot-bower-json/'
b'1.0/example.js"></script>'
b'</head><body>Hello!</body></html>')
def test_injector_endpoint_multiple_mains():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('multi_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/multi_main/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/multi_main/2.1.1/dist/another.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_endpoint_depends_on_multiple_mains():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('depends_on_multi_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/multi_main/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/multi_main/2.1.1/dist/another.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/depends_on_multi_main/'
b'2.1.1/dist/resource.js"></script>'
b'</head><body>Hello!</body></html>')
def test_injector_endpoint_resource():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
jquery = components.resource('jquery')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include(jquery)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_endpoint_dependencies():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery-ui')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/'
b'1.10.4/ui/jquery-ui.js">'
b'</script>'
b'</head><body>Hello!</body></html>')
def test_injector_endpoint_dependencies_with_explicit_resource_objects():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
jquery_ui = components.resource('jquery-ui')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include(jquery_ui)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/'
b'1.10.4/ui/jquery-ui.js">'
b'</script>'
b'</head><body>Hello!</body></html>')
def test_injector_normal_dependencies():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
components.resource(
'jquery-ui/ui/minified/jquery-ui.min.js',
dependencies=['jquery/dist/jquery.min.js'])
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery-ui/ui/minified/jquery-ui.min.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.min.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/'
b'1.10.4/ui/minified/jquery-ui.min.js">'
b'</script>'
b'</head><body>Hello!</body></html>')
def test_injector_normal_dependencies_explicit_resource_objects():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
jquery_min = components.resource(
'jquery/dist/jquery.min.js')
jquery_ui_min = components.resource(
'jquery-ui/ui/minified/jquery-ui.min.js',
dependencies=[jquery_min])
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include(jquery_ui_min)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.min.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/'
b'1.10.4/ui/minified/jquery-ui.min.js">'
b'</script>'
b'</head><body>Hello!</body></html>')
def test_injector_no_inclusions():
bower = bowerstatic.Bower()
bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == b'<html><head></head><body>Hello!</body></html>'
def test_injector_multiple_identical_inclusions():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery')
include('jquery')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_multiple_identical_inclusions_through_dependencies():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
# going to pull in jquery-ui and jquery twice
include('jquery-ui')
include('jquery-ui-bootstrap')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/1.10.4/ui/jquery-ui.js">'
b'</script>\n'
b'<link rel="stylesheet" type="text/css" '
b'href="/bowerstatic/components/jquery-ui-bootstrap/0.2.5/'
b'jquery.ui.theme.css">'
b'</head><body>Hello!</body></html>')
def test_injector_no_head_to_inject():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'<html><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == b'<html><body>Hello!</body></html>'
def test_injector_not_html_no_effect():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'Hello!']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == b'Hello!'
def test_injector_PUT_no_effect():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.put('/')
assert response.body == b'<html><head></head><body>Hello!</body></html>'
def test_custom_renderer():
bower = bowerstatic.Bower()
def render_foo(resource):
return '<foo>%s</foo>' % resource.url()
bower.register_renderer('.foo', render_foo)
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/resource.foo')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head><foo>/bowerstatic/components/jquery/2.1.1/dist/'
b'resource.foo</foo>'
b'</head><body>Hello!</body></html>')
def test_missing_renderer():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/resource.foo')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
with pytest.raises(bowerstatic.Error):
c.get('/')
def test_injector_main_unknown_extension():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('unknown_ext_in_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head><script type="text/javascript" '
b'src="/bowerstatic/components/unknown_ext_in_main/2.1.1/'
b'dist/jquery.js"></script></head><body>Hello!</body></html>')
def test_injector_custom_renderer_string_format():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery', '<link src="{url}">')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<link src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</head><body>Hello!</body></html>')
def test_injector_custom_renderer_callable():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def custom_renderer(resource):
return '<link src="%s">' % resource.url()
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery', custom_renderer)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<link src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</head><body>Hello!</body></html>')
def test_injector_inline_renderer():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery', bowerstatic.renderer.render_inline_js)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head><script type="text/javascript">/* jquery.js 2.1.1 */\n'
b'</script></head><body>Hello!</body></html>')
def test_injector_no_content_type_set():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'SOME-BINARY-OR-NOT-HTML-DATA']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (b'SOME-BINARY-OR-NOT-HTML-DATA') | bowerstatic/tests/test_injector.py | from __future__ import absolute_import
from webtest import TestApp as Client
import bowerstatic
import json
import mock
import os
import pytest
def test_injector_specific_path():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_specific_path_wrong_file():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
with pytest.raises(bowerstatic.Error):
include('jquery/nonexistent.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
c.get('/')
def test_injector_does_not_fail_for_401_responses_with_no_content_type():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
# Can not use 401 here as webtest only accepts 200 or 3xx, which is ok
# as we want to test the behaviour if no content type is given
start_response('302', [('Content-Type', None)])
include = components.includer(environ)
with pytest.raises(bowerstatic.Error):
include('jquery/nonexistent.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
# webtest checks, in contracy to pyramid, the headers and breaks if one of
# them is not a string.
with mock.patch('webtest.lint.check_headers'):
c.get('/')
def test_injector_specific_path_wrong_file_then_added(tmpdir):
bower_components_dir = tmpdir.mkdir('bower_components')
component_dir = bower_components_dir.mkdir('component')
bower_json_file = component_dir.join('.bower.json')
bower_json_file.write(json.dumps({
'name': 'component',
'version': '2.1',
'main': 'main.js'
}))
main_js_file = component_dir.join('main.js')
main_js_file.write('/* this is main.js */')
bower = bowerstatic.Bower()
components = bower.components('components', bower_components_dir.strpath)
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('component/notyet.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
with pytest.raises(bowerstatic.Error):
c.get('/')
# now we add the nonexistent file
notyet_file = component_dir.join('notyet.js')
notyet_file.write('/* this is notyet.js */')
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/component/2.1/notyet.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_wrong_component():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
with pytest.raises(bowerstatic.Error):
include('nonexistent/nonexistent.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
c.get('/')
@pytest.mark.xfail
def test_injector_wrong_component_then_added(tmpdir):
bower_components_dir = tmpdir.mkdir('bower_components')
bower = bowerstatic.Bower()
components = bower.components('components', bower_components_dir.strpath)
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('component/main.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
with pytest.raises(bowerstatic.Error):
c.get('/')
# now add the component
component_dir = bower_components_dir.mkdir('component')
bower_json_file = component_dir.join('.bower.json')
bower_json_file.write(json.dumps({
'name': 'component',
'version': '2.1',
'main': 'main.js'
}))
main_js_file = component_dir.join('main.js')
main_js_file.write('/* this is main.js */')
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/component/2.1/main.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_specific_resource():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
jquery = components.resource('jquery/dist/jquery.js')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include(jquery)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_endpoint_path():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_endpoint_main_missing():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('missing_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
# without a main, it should just include nothing
assert response.body == (
b'<html><head></head><body>Hello!</body></html>')
def test_injector_endpoint_depends_on_main_missing():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('depends_on_missing_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
# without a main, it should just include nothing
assert response.body == (
b'<html><head><script type="text/javascript" '
b'src="/bowerstatic/components/depends_on_missing_main/'
b'2.1.1/resource.js"></script></head><body>Hello!</body></html>')
def test_injector_missing_version_bower_components():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('missing-version-in-dot-bower-json')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
# without a main, it should just include nothing
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/missing-version-in-dot-bower-json/'
b'1.0/example.js"></script>'
b'</head><body>Hello!</body></html>')
def test_injector_endpoint_multiple_mains():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('multi_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/multi_main/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/multi_main/2.1.1/dist/another.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_endpoint_depends_on_multiple_mains():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('depends_on_multi_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/multi_main/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/multi_main/2.1.1/dist/another.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/depends_on_multi_main/'
b'2.1.1/dist/resource.js"></script>'
b'</head><body>Hello!</body></html>')
def test_injector_endpoint_resource():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
jquery = components.resource('jquery')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include(jquery)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_endpoint_dependencies():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery-ui')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/'
b'1.10.4/ui/jquery-ui.js">'
b'</script>'
b'</head><body>Hello!</body></html>')
def test_injector_endpoint_dependencies_with_explicit_resource_objects():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
jquery_ui = components.resource('jquery-ui')
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include(jquery_ui)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/'
b'1.10.4/ui/jquery-ui.js">'
b'</script>'
b'</head><body>Hello!</body></html>')
def test_injector_normal_dependencies():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
components.resource(
'jquery-ui/ui/minified/jquery-ui.min.js',
dependencies=['jquery/dist/jquery.min.js'])
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery-ui/ui/minified/jquery-ui.min.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.min.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/'
b'1.10.4/ui/minified/jquery-ui.min.js">'
b'</script>'
b'</head><body>Hello!</body></html>')
def test_injector_normal_dependencies_explicit_resource_objects():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
jquery_min = components.resource(
'jquery/dist/jquery.min.js')
jquery_ui_min = components.resource(
'jquery-ui/ui/minified/jquery-ui.min.js',
dependencies=[jquery_min])
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include(jquery_ui_min)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.min.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/'
b'1.10.4/ui/minified/jquery-ui.min.js">'
b'</script>'
b'</head><body>Hello!</body></html>')
def test_injector_no_inclusions():
bower = bowerstatic.Bower()
bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == b'<html><head></head><body>Hello!</body></html>'
def test_injector_multiple_identical_inclusions():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery')
include('jquery')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script></head><body>Hello!</body></html>')
def test_injector_multiple_identical_inclusions_through_dependencies():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
# going to pull in jquery-ui and jquery twice
include('jquery-ui')
include('jquery-ui-bootstrap')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</script>\n'
b'<script type="text/javascript" '
b'src="/bowerstatic/components/jquery-ui/1.10.4/ui/jquery-ui.js">'
b'</script>\n'
b'<link rel="stylesheet" type="text/css" '
b'href="/bowerstatic/components/jquery-ui-bootstrap/0.2.5/'
b'jquery.ui.theme.css">'
b'</head><body>Hello!</body></html>')
def test_injector_no_head_to_inject():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'<html><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == b'<html><body>Hello!</body></html>'
def test_injector_not_html_no_effect():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'Hello!']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == b'Hello!'
def test_injector_PUT_no_effect():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.put('/')
assert response.body == b'<html><head></head><body>Hello!</body></html>'
def test_custom_renderer():
bower = bowerstatic.Bower()
def render_foo(resource):
return '<foo>%s</foo>' % resource.url()
bower.register_renderer('.foo', render_foo)
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/resource.foo')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head><foo>/bowerstatic/components/jquery/2.1.1/dist/'
b'resource.foo</foo>'
b'</head><body>Hello!</body></html>')
def test_missing_renderer():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery/dist/resource.foo')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
with pytest.raises(bowerstatic.Error):
c.get('/')
def test_injector_main_unknown_extension():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('unknown_ext_in_main')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head><script type="text/javascript" '
b'src="/bowerstatic/components/unknown_ext_in_main/2.1.1/'
b'dist/jquery.js"></script></head><body>Hello!</body></html>')
def test_injector_custom_renderer_string_format():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery', '<link src="{url}">')
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<link src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</head><body>Hello!</body></html>')
def test_injector_custom_renderer_callable():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def custom_renderer(resource):
return '<link src="%s">' % resource.url()
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery', custom_renderer)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head>'
b'<link src="/bowerstatic/components/jquery/2.1.1/dist/jquery.js">'
b'</head><body>Hello!</body></html>')
def test_injector_inline_renderer():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html;charset=UTF-8')])
include = components.includer(environ)
include('jquery', bowerstatic.renderer.render_inline_js)
return [b'<html><head></head><body>Hello!</body></html>']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (
b'<html><head><script type="text/javascript">/* jquery.js 2.1.1 */\n'
b'</script></head><body>Hello!</body></html>')
def test_injector_no_content_type_set():
bower = bowerstatic.Bower()
components = bower.components('components', os.path.join(
os.path.dirname(__file__), 'bower_components'))
def wsgi(environ, start_response):
start_response('200 OK', [])
include = components.includer(environ)
include('jquery/dist/jquery.js')
return [b'SOME-BINARY-OR-NOT-HTML-DATA']
injector = bower.injector(wsgi)
c = Client(injector)
response = c.get('/')
assert response.body == (b'SOME-BINARY-OR-NOT-HTML-DATA') | 0.395251 | 0.156073 |
from __future__ import print_function
import sys, logging, six
import datetime as dt
from collections import OrderedDict
from objbrowser import browse
from objbrowser.utils import logging_basic_config
from objbrowser.attribute_model import ALL_ATTR_MODELS
logger = logging.getLogger(__name__)
MY_CONSTANT = 55
YOUR_CONSTANT = MY_CONSTANT
ANOTHER_CONSTANT = MY_CONSTANT * 2
def call_viewer_test():
""" Test procedure.
"""
import types, os
from os.path import join
if 1:
# In Python 3 there is no OldStyleClass anymore. The definition below will result in a
# new style class as well.
class OldStyleClass:
""" An old style class (pre Python 2.2)
See: http://docs.python.org/2/reference/datamodel.html#new-style-and-classic-classes
"""
static_member = 'static_value'
def __init__(self, s, i):
'constructor'
self._member_str = s
self.__member_int = i
class NewStyleClass(object):
""" A new style class (Python 2.2 and later). Note it inherits 'object'.
See: http://docs.python.org/2/reference/datamodel.html#new-style-and-classic-classes
"""
static_member = 'static_value'
def __init__(self, s, i):
'constructor'
self._member_str = s
self.__member_int = i
@property
def member_int(self):
return self.__member_int
@member_int.setter
def member_int(self, value):
self.__member_int = value
def method(self):
pass
@staticmethod
def static_method(self):
pass
@classmethod
def class_method(self):
pass
old_style_object = OldStyleClass('member_value', 44)
new_style_object = NewStyleClass('member_value', -66)
# Some comments just above
# the function definition.
def my_function(param):
"demo function"
return param
_copyright = types.__builtins__['copyright']
x_plus_2 = lambda x: x+2
Int = int
a = 6
b = 'seven'
c = 8j + 3 # complex number
d = {'4': 44, 's': 11, c: None}
e = 2.718281828
f_large = 7.77e14 # different str and repr?
f_avogadro = 6.02214129e23
ellip = Ellipsis
my_slice = slice(None, 3, -1)
n = None
not_impl = NotImplemented
tup = ('this', 'is', 'a tuple')
lst = [4, '4', d, ['r', dir], main]
my_set = set([3, 4, 4, 8])
my_frozenset = frozenset([3, 4, 5, 6, 6])
dict_regular = {'banana': 3, 'apple':4, 'pear': 1, 'orange': 2}
dict_ordered = OrderedDict(sorted(dict_regular.items(), key=lambda t: t[1])) # sorted by value
__special_dict_item__ = """A variable that begins and end with to underscores but is a
dictionary item, opposed to an attribute. It should therefore always be displayed, even
if the 'show __special_attributes__' view option is toggled off
"""
dt_now = dt.datetime.now()
date_now = dt.date(2014, 3, 23)
date_first = date_now.min
date_last = date_now.max
t = dt.time(13, 33, 1)
try:
import numpy as np
except ImportError as ex:
logger.warning(ex)
else:
arr = np.arange(24, dtype=np.uint16).reshape(8, 3)
pi_16bit = np.float16(np.pi)
# Datetime arrays
daysInFeb2005 = np.arange('2005-02', '2005-03', dtype='datetime64[D]')
# Structured arrays (http://docs.scipy.org/doc/numpy/user/basics.rec.html)
datatype1 = np.dtype([('name', np.str_, 16), ('grade', np.float64)])
structured_array = np.array([('Sarah', 8.0), ('John', 7.5)], dtype=datatype1)
# Structured array with sub array
# (http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
datatype2 = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
array_with_sub_array = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=datatype2)
try:
import serial
except ImportError as ex:
logger.warning(ex)
else:
# PySerial object. Does not work if the port/device is closed. I cannot fix this.
ser = serial.Serial()
# These will give error in the str() representation.
# I deliberately did not use string.encode('ascii', 'backslashreplace') to
# demonstrate the difference between str() and repr()
u1 = six.unichr(40960) + u'ab\ncd' + six.unichr(1972)
u2 = u"a\xac\u1234\u20ac\U00008000"
u3 = u'all ASCII chars'
multi_line_str = """hello\r\nworld
the\rend."""
# TODO: LOOK at iterators and generators. E.g. beautiful soup
browse(locals(), reset = False, # without obj_name
show_special_attributes = None,
show_callable_attributes = None)
if 0:
browse(globals(), name = 'globals()',
attribute_columns = ALL_ATTR_MODELS,
attribute_details = ALL_ATTR_MODELS[1:4])
def main():
""" Main program to test stand alone
"""
logging_basic_config('DEBUG')
logger.info('Started example')
exit_code = call_viewer_test()
logging.info('Done example')
sys.exit(exit_code)
if __name__ == '__main__':
main() | examples/test.py | from __future__ import print_function
import sys, logging, six
import datetime as dt
from collections import OrderedDict
from objbrowser import browse
from objbrowser.utils import logging_basic_config
from objbrowser.attribute_model import ALL_ATTR_MODELS
logger = logging.getLogger(__name__)
MY_CONSTANT = 55
YOUR_CONSTANT = MY_CONSTANT
ANOTHER_CONSTANT = MY_CONSTANT * 2
def call_viewer_test():
""" Test procedure.
"""
import types, os
from os.path import join
if 1:
# In Python 3 there is no OldStyleClass anymore. The definition below will result in a
# new style class as well.
class OldStyleClass:
""" An old style class (pre Python 2.2)
See: http://docs.python.org/2/reference/datamodel.html#new-style-and-classic-classes
"""
static_member = 'static_value'
def __init__(self, s, i):
'constructor'
self._member_str = s
self.__member_int = i
class NewStyleClass(object):
""" A new style class (Python 2.2 and later). Note it inherits 'object'.
See: http://docs.python.org/2/reference/datamodel.html#new-style-and-classic-classes
"""
static_member = 'static_value'
def __init__(self, s, i):
'constructor'
self._member_str = s
self.__member_int = i
@property
def member_int(self):
return self.__member_int
@member_int.setter
def member_int(self, value):
self.__member_int = value
def method(self):
pass
@staticmethod
def static_method(self):
pass
@classmethod
def class_method(self):
pass
old_style_object = OldStyleClass('member_value', 44)
new_style_object = NewStyleClass('member_value', -66)
# Some comments just above
# the function definition.
def my_function(param):
"demo function"
return param
_copyright = types.__builtins__['copyright']
x_plus_2 = lambda x: x+2
Int = int
a = 6
b = 'seven'
c = 8j + 3 # complex number
d = {'4': 44, 's': 11, c: None}
e = 2.718281828
f_large = 7.77e14 # different str and repr?
f_avogadro = 6.02214129e23
ellip = Ellipsis
my_slice = slice(None, 3, -1)
n = None
not_impl = NotImplemented
tup = ('this', 'is', 'a tuple')
lst = [4, '4', d, ['r', dir], main]
my_set = set([3, 4, 4, 8])
my_frozenset = frozenset([3, 4, 5, 6, 6])
dict_regular = {'banana': 3, 'apple':4, 'pear': 1, 'orange': 2}
dict_ordered = OrderedDict(sorted(dict_regular.items(), key=lambda t: t[1])) # sorted by value
__special_dict_item__ = """A variable that begins and end with to underscores but is a
dictionary item, opposed to an attribute. It should therefore always be displayed, even
if the 'show __special_attributes__' view option is toggled off
"""
dt_now = dt.datetime.now()
date_now = dt.date(2014, 3, 23)
date_first = date_now.min
date_last = date_now.max
t = dt.time(13, 33, 1)
try:
import numpy as np
except ImportError as ex:
logger.warning(ex)
else:
arr = np.arange(24, dtype=np.uint16).reshape(8, 3)
pi_16bit = np.float16(np.pi)
# Datetime arrays
daysInFeb2005 = np.arange('2005-02', '2005-03', dtype='datetime64[D]')
# Structured arrays (http://docs.scipy.org/doc/numpy/user/basics.rec.html)
datatype1 = np.dtype([('name', np.str_, 16), ('grade', np.float64)])
structured_array = np.array([('Sarah', 8.0), ('John', 7.5)], dtype=datatype1)
# Structured array with sub array
# (http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
datatype2 = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
array_with_sub_array = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=datatype2)
try:
import serial
except ImportError as ex:
logger.warning(ex)
else:
# PySerial object. Does not work if the port/device is closed. I cannot fix this.
ser = serial.Serial()
# These will give error in the str() representation.
# I deliberately did not use string.encode('ascii', 'backslashreplace') to
# demonstrate the difference between str() and repr()
u1 = six.unichr(40960) + u'ab\ncd' + six.unichr(1972)
u2 = u"a\xac\u1234\u20ac\U00008000"
u3 = u'all ASCII chars'
multi_line_str = """hello\r\nworld
the\rend."""
# TODO: LOOK at iterators and generators. E.g. beautiful soup
browse(locals(), reset = False, # without obj_name
show_special_attributes = None,
show_callable_attributes = None)
if 0:
browse(globals(), name = 'globals()',
attribute_columns = ALL_ATTR_MODELS,
attribute_details = ALL_ATTR_MODELS[1:4])
def main():
""" Main program to test stand alone
"""
logging_basic_config('DEBUG')
logger.info('Started example')
exit_code = call_viewer_test()
logging.info('Done example')
sys.exit(exit_code)
if __name__ == '__main__':
main() | 0.390708 | 0.138055 |
import os
import sys
import tempfile
import unittest
import subprocess
from flower.command import FlowerCommand
from tornado.options import options
from tests.unit import AsyncHTTPTestCase
class TestFlowerCommand(AsyncHTTPTestCase):
def test_port(self):
with self.mock_option('port', 5555):
command = FlowerCommand()
command.apply_options('flower', argv=['--port=123'])
self.assertEqual(123, options.port)
def test_address(self):
with self.mock_option('address', '127.0.0.1'):
command = FlowerCommand()
command.apply_options('flower', argv=['--address=foo'])
self.assertEqual('foo', options.address)
class TestConfOption(AsyncHTTPTestCase):
def test_error_conf(self):
with self.mock_option('conf', None):
command = FlowerCommand()
self.assertRaises(IOError, command.apply_options,
'flower', argv=['--conf=foo'])
self.assertRaises(IOError, command.apply_options,
'flower', argv=['--conf=/tmp/flower/foo'])
def test_default_option(self):
command = FlowerCommand()
command.apply_options('flower', argv=[])
self.assertEqual('flowerconfig.py', options.conf)
def test_empty_conf(self):
with self.mock_option('conf', None):
command = FlowerCommand()
command.apply_options('flower', argv=['--conf=/dev/null'])
self.assertEqual('/dev/null', options.conf)
def test_conf_abs(self):
with tempfile.NamedTemporaryFile() as cf:
with self.mock_option('conf', cf.name), self.mock_option('debug', False):
cf.write('debug=True\n'.encode('utf-8'))
cf.flush()
command = FlowerCommand()
command.apply_options('flower', argv=['--conf=%s' % cf.name])
self.assertEqual(cf.name, options.conf)
self.assertTrue(options.debug)
def test_conf_relative(self):
with tempfile.NamedTemporaryFile(dir='.') as cf:
with self.mock_option('conf', cf.name), self.mock_option('debug', False):
cf.write('debug=True\n'.encode('utf-8'))
cf.flush()
command = FlowerCommand()
command.apply_options('flower', argv=['--conf=%s' % os.path.basename(cf.name)])
self.assertTrue(options.debug)
@unittest.skipUnless(not sys.platform.startswith("win"), 'skip windows')
def test_all_options_documented(self):
def grep(patter, filename):
return int(subprocess.check_output(
'grep "%s" %s|wc -l' % (patter, filename), shell=True))
defined = grep('^define(', 'flower/options.py') - 4
documented = grep('^~~', 'docs/config.rst')
self.assertEqual(defined, documented,
msg='Missing option documentation. Make sure all options '
'are documented in docs/config.rst') | tests/unit/test_command.py | import os
import sys
import tempfile
import unittest
import subprocess
from flower.command import FlowerCommand
from tornado.options import options
from tests.unit import AsyncHTTPTestCase
class TestFlowerCommand(AsyncHTTPTestCase):
def test_port(self):
with self.mock_option('port', 5555):
command = FlowerCommand()
command.apply_options('flower', argv=['--port=123'])
self.assertEqual(123, options.port)
def test_address(self):
with self.mock_option('address', '127.0.0.1'):
command = FlowerCommand()
command.apply_options('flower', argv=['--address=foo'])
self.assertEqual('foo', options.address)
class TestConfOption(AsyncHTTPTestCase):
def test_error_conf(self):
with self.mock_option('conf', None):
command = FlowerCommand()
self.assertRaises(IOError, command.apply_options,
'flower', argv=['--conf=foo'])
self.assertRaises(IOError, command.apply_options,
'flower', argv=['--conf=/tmp/flower/foo'])
def test_default_option(self):
command = FlowerCommand()
command.apply_options('flower', argv=[])
self.assertEqual('flowerconfig.py', options.conf)
def test_empty_conf(self):
with self.mock_option('conf', None):
command = FlowerCommand()
command.apply_options('flower', argv=['--conf=/dev/null'])
self.assertEqual('/dev/null', options.conf)
def test_conf_abs(self):
with tempfile.NamedTemporaryFile() as cf:
with self.mock_option('conf', cf.name), self.mock_option('debug', False):
cf.write('debug=True\n'.encode('utf-8'))
cf.flush()
command = FlowerCommand()
command.apply_options('flower', argv=['--conf=%s' % cf.name])
self.assertEqual(cf.name, options.conf)
self.assertTrue(options.debug)
def test_conf_relative(self):
with tempfile.NamedTemporaryFile(dir='.') as cf:
with self.mock_option('conf', cf.name), self.mock_option('debug', False):
cf.write('debug=True\n'.encode('utf-8'))
cf.flush()
command = FlowerCommand()
command.apply_options('flower', argv=['--conf=%s' % os.path.basename(cf.name)])
self.assertTrue(options.debug)
@unittest.skipUnless(not sys.platform.startswith("win"), 'skip windows')
def test_all_options_documented(self):
def grep(patter, filename):
return int(subprocess.check_output(
'grep "%s" %s|wc -l' % (patter, filename), shell=True))
defined = grep('^define(', 'flower/options.py') - 4
documented = grep('^~~', 'docs/config.rst')
self.assertEqual(defined, documented,
msg='Missing option documentation. Make sure all options '
'are documented in docs/config.rst') | 0.337749 | 0.164987 |
import wx
import common, config
from edit_windows import ManagedBase, EditStylesMixin
from gui_mixins import BitmapMixin
from tree import Node
import new_properties as np
class EditToggleButton(ManagedBase, EditStylesMixin, BitmapMixin):
"Class to handle wxToggleButton objects"
_PROPERTIES = ["Widget", "label", "value",
"bitmap", "disabled_bitmap", "pressed_bitmap", "current_bitmap", "focus_bitmap",
"style"]
PROPERTIES = ManagedBase.PROPERTIES + _PROPERTIES + ManagedBase.EXTRA_PROPERTIES
_PROPERTY_LABELS = {"value":"Clicked"}
def __init__(self, name, parent, id, label, sizer, pos):
ManagedBase.__init__(self, name, 'wxToggleButton', parent, id, sizer, pos)
EditStylesMixin.__init__(self)
# initialise instance variable
self.label = np.TextProperty(label, multiline="grow")
self.value = np.CheckBoxProperty(False, default_value=False)
# bitmaps are only for >= 3.0
self.bitmap = np.BitmapPropertyD(min_version=(3,0))
self.disabled_bitmap = np.BitmapPropertyD(min_version=(3,0))
self.pressed_bitmap = np.BitmapPropertyD(min_version=(3,0))
self.current_bitmap = np.BitmapPropertyD(min_version=(3,0))
self.focus_bitmap = np.BitmapPropertyD(min_version=(3,0))
def create_widget(self):
self.widget = wx.ToggleButton(self.parent.widget, self.id, self.label)
self.widget.SetValue(self.value)
self.widget.Bind(wx.EVT_TOGGLEBUTTON, self.on_set_focus, id=self.id)
BitmapMixin._set_preview_bitmaps(self)
def properties_changed(self, modified):
if not modified or "value" in modified and self.widget:
self.widget.SetValue(self.value)
if not modified or "label" in modified:
if self.widget:
self.widget.SetLabel(self.label)
self._set_widget_best_size()
common.app_tree.refresh(self.node, refresh_label=True)
BitmapMixin._properties_changed(self, modified)
self._set_widget_best_size()
EditStylesMixin.properties_changed(self, modified)
ManagedBase.properties_changed(self, modified)
def builder(parent, sizer, pos, number=[1]):
"factory function for EditToggleButton objects"
name = u'button_%d' % number[0]
while common.app_tree.has_name(name):
number[0] += 1
name = u'button_%d' % number[0]
with parent.frozen():
button = EditToggleButton(name, parent, wx.NewId(), name, sizer, pos)
button.properties["style"].set_to_default()
button.check_defaults()
node = Node(button)
button.node = node
if parent.widget: button.create()
common.app_tree.insert(node, sizer.node, pos-1)
def xml_builder(attrs, parent, sizer, sizeritem, pos=None):
"factory to build EditToggleButton objects from a XML file"
from xml_parse import XmlParsingError
try:
name = attrs['name']
except KeyError:
raise XmlParsingError(_("'name' attribute missing"))
if sizer is None or sizeritem is None:
raise XmlParsingError(_("sizer or sizeritem object cannot be None"))
button = EditToggleButton(name, parent, wx.NewId(), '', sizer, pos)
#sizer.set_item(button.pos, proportion=sizeritem.proportion, span=sizeritem.span, flag=sizeritem.flag, border=sizeritem.border)
node = Node(button)
button.node = node
if pos is None:
common.app_tree.add(node, sizer.node)
else:
common.app_tree.insert(node, sizer.node, pos-1)
return button
def initialize():
"initialization function for the module: returns a wxBitmapButton to be added to the main palette"
common.widgets['EditToggleButton'] = builder
common.widgets_from_xml['EditToggleButton'] = xml_builder
return common.make_object_button('EditToggleButton', 'toggle_button.xpm') | Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/widgets/toggle_button/toggle_button.py | import wx
import common, config
from edit_windows import ManagedBase, EditStylesMixin
from gui_mixins import BitmapMixin
from tree import Node
import new_properties as np
class EditToggleButton(ManagedBase, EditStylesMixin, BitmapMixin):
"Class to handle wxToggleButton objects"
_PROPERTIES = ["Widget", "label", "value",
"bitmap", "disabled_bitmap", "pressed_bitmap", "current_bitmap", "focus_bitmap",
"style"]
PROPERTIES = ManagedBase.PROPERTIES + _PROPERTIES + ManagedBase.EXTRA_PROPERTIES
_PROPERTY_LABELS = {"value":"Clicked"}
def __init__(self, name, parent, id, label, sizer, pos):
ManagedBase.__init__(self, name, 'wxToggleButton', parent, id, sizer, pos)
EditStylesMixin.__init__(self)
# initialise instance variable
self.label = np.TextProperty(label, multiline="grow")
self.value = np.CheckBoxProperty(False, default_value=False)
# bitmaps are only for >= 3.0
self.bitmap = np.BitmapPropertyD(min_version=(3,0))
self.disabled_bitmap = np.BitmapPropertyD(min_version=(3,0))
self.pressed_bitmap = np.BitmapPropertyD(min_version=(3,0))
self.current_bitmap = np.BitmapPropertyD(min_version=(3,0))
self.focus_bitmap = np.BitmapPropertyD(min_version=(3,0))
def create_widget(self):
self.widget = wx.ToggleButton(self.parent.widget, self.id, self.label)
self.widget.SetValue(self.value)
self.widget.Bind(wx.EVT_TOGGLEBUTTON, self.on_set_focus, id=self.id)
BitmapMixin._set_preview_bitmaps(self)
def properties_changed(self, modified):
if not modified or "value" in modified and self.widget:
self.widget.SetValue(self.value)
if not modified or "label" in modified:
if self.widget:
self.widget.SetLabel(self.label)
self._set_widget_best_size()
common.app_tree.refresh(self.node, refresh_label=True)
BitmapMixin._properties_changed(self, modified)
self._set_widget_best_size()
EditStylesMixin.properties_changed(self, modified)
ManagedBase.properties_changed(self, modified)
def builder(parent, sizer, pos, number=[1]):
"factory function for EditToggleButton objects"
name = u'button_%d' % number[0]
while common.app_tree.has_name(name):
number[0] += 1
name = u'button_%d' % number[0]
with parent.frozen():
button = EditToggleButton(name, parent, wx.NewId(), name, sizer, pos)
button.properties["style"].set_to_default()
button.check_defaults()
node = Node(button)
button.node = node
if parent.widget: button.create()
common.app_tree.insert(node, sizer.node, pos-1)
def xml_builder(attrs, parent, sizer, sizeritem, pos=None):
"factory to build EditToggleButton objects from a XML file"
from xml_parse import XmlParsingError
try:
name = attrs['name']
except KeyError:
raise XmlParsingError(_("'name' attribute missing"))
if sizer is None or sizeritem is None:
raise XmlParsingError(_("sizer or sizeritem object cannot be None"))
button = EditToggleButton(name, parent, wx.NewId(), '', sizer, pos)
#sizer.set_item(button.pos, proportion=sizeritem.proportion, span=sizeritem.span, flag=sizeritem.flag, border=sizeritem.border)
node = Node(button)
button.node = node
if pos is None:
common.app_tree.add(node, sizer.node)
else:
common.app_tree.insert(node, sizer.node, pos-1)
return button
def initialize():
"initialization function for the module: returns a wxBitmapButton to be added to the main palette"
common.widgets['EditToggleButton'] = builder
common.widgets_from_xml['EditToggleButton'] = xml_builder
return common.make_object_button('EditToggleButton', 'toggle_button.xpm') | 0.474388 | 0.092852 |
from specutils import SpectrumList
from specutils.io.registers import data_loader
from .dc_common import FITS_FILE_EXTS, SINGLE_SPLIT_LABEL
from ..parsing_utils import read_fileobj_or_hdulist
GALAH_5EXT_CONFIG = {
"hdus": {
"0": {
"purpose": "science",
"units": {"flux_unit": "count"},
},
"1": {
"purpose": "error_stdev",
"units": {"flux_unit": "count"},
},
"2": {
"purpose": "unreduced_science",
"units": {"flux_unit": "count"},
},
"3": {
"purpose": "unreduced_error_stdev",
"units": {"flux_unit": "count"},
},
"4": {
"purpose": "normalised_science",
"units": {"flux_unit": ""},
},
},
"wcs": {
"pixel_reference_point_keyword": "CRPIX1",
"pixel_reference_point_value_keyword": "CRVAL1",
"pixel_width_keyword": "CDELT1",
"wavelength_unit": "Angstrom",
},
"units": None,
"all_standard_units": False,
"all_keywords": False,
"valid_wcs": False,
}
GALAH_4EXT_CONFIG = {
"hdus": {
"0": {"purpose": "science"},
"1": {"purpose": "error_stdev"},
"2": {"purpose": "unreduced_science"},
"3": {"purpose": "unreduced_error_stdev"},
},
"wcs": {
"pixel_reference_point_keyword": "CRPIX1",
"pixel_reference_point_value_keyword": "CRVAL1",
"pixel_width_keyword": "CDELT1",
"wavelength_unit": "Angstrom",
},
"units": {"flux_unit": "count"},
"all_standard_units": False,
"all_keywords": False,
"valid_wcs": False,
}
def identify_galah(origin, *args, **kwargs):
"""
Identify if the current file is a GALAH file
"""
with read_fileobj_or_hdulist(*args, **kwargs) as hdulist:
if "GALAH" in hdulist[0].header.get("ORIGIN"):
return True
return False
@data_loader(
label="GALAH", extensions=FITS_FILE_EXTS, dtype=SpectrumList,
identifier=identify_galah, priority=10,
)
def galah_loader(filename):
with read_fileobj_or_hdulist(filename) as hdulist:
if len(hdulist) == 5:
spectra = SpectrumList.read(
hdulist, format=SINGLE_SPLIT_LABEL, **GALAH_5EXT_CONFIG
)
spectra[0].meta["galah_hdu_format"] = 5
elif len(hdulist) == 4:
spectra = SpectrumList.read(
hdulist, format=SINGLE_SPLIT_LABEL, **GALAH_4EXT_CONFIG
)
spectra[0].meta["galah_hdu_format"] = 4
else:
raise RuntimeError(
"Unknown GALAH format, has {} extensions".format(len(hdulist))
)
return spectra | specutils/io/default_loaders/galah.py | from specutils import SpectrumList
from specutils.io.registers import data_loader
from .dc_common import FITS_FILE_EXTS, SINGLE_SPLIT_LABEL
from ..parsing_utils import read_fileobj_or_hdulist
GALAH_5EXT_CONFIG = {
"hdus": {
"0": {
"purpose": "science",
"units": {"flux_unit": "count"},
},
"1": {
"purpose": "error_stdev",
"units": {"flux_unit": "count"},
},
"2": {
"purpose": "unreduced_science",
"units": {"flux_unit": "count"},
},
"3": {
"purpose": "unreduced_error_stdev",
"units": {"flux_unit": "count"},
},
"4": {
"purpose": "normalised_science",
"units": {"flux_unit": ""},
},
},
"wcs": {
"pixel_reference_point_keyword": "CRPIX1",
"pixel_reference_point_value_keyword": "CRVAL1",
"pixel_width_keyword": "CDELT1",
"wavelength_unit": "Angstrom",
},
"units": None,
"all_standard_units": False,
"all_keywords": False,
"valid_wcs": False,
}
GALAH_4EXT_CONFIG = {
"hdus": {
"0": {"purpose": "science"},
"1": {"purpose": "error_stdev"},
"2": {"purpose": "unreduced_science"},
"3": {"purpose": "unreduced_error_stdev"},
},
"wcs": {
"pixel_reference_point_keyword": "CRPIX1",
"pixel_reference_point_value_keyword": "CRVAL1",
"pixel_width_keyword": "CDELT1",
"wavelength_unit": "Angstrom",
},
"units": {"flux_unit": "count"},
"all_standard_units": False,
"all_keywords": False,
"valid_wcs": False,
}
def identify_galah(origin, *args, **kwargs):
"""
Identify if the current file is a GALAH file
"""
with read_fileobj_or_hdulist(*args, **kwargs) as hdulist:
if "GALAH" in hdulist[0].header.get("ORIGIN"):
return True
return False
@data_loader(
label="GALAH", extensions=FITS_FILE_EXTS, dtype=SpectrumList,
identifier=identify_galah, priority=10,
)
def galah_loader(filename):
with read_fileobj_or_hdulist(filename) as hdulist:
if len(hdulist) == 5:
spectra = SpectrumList.read(
hdulist, format=SINGLE_SPLIT_LABEL, **GALAH_5EXT_CONFIG
)
spectra[0].meta["galah_hdu_format"] = 5
elif len(hdulist) == 4:
spectra = SpectrumList.read(
hdulist, format=SINGLE_SPLIT_LABEL, **GALAH_4EXT_CONFIG
)
spectra[0].meta["galah_hdu_format"] = 4
else:
raise RuntimeError(
"Unknown GALAH format, has {} extensions".format(len(hdulist))
)
return spectra | 0.695648 | 0.364127 |
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import json
from link_crawler import link_crawler
from datetime import datetime, timedelta
import os
import shutil
import zlib
from hashlib import md5
import base64
a2b_hex = base64.binascii.a2b_hex
hexlify = base64.binascii.hexlify
md5_str = lambda x: md5(x).hexdigest()
class DiskCache:
"""
磁盘缓存
过期检测
数据压缩
"""
def __init__(self, cache_dir='cache', expires=timedelta(days=30)):
"""
cache主目录
过期时间
是否压缩
"""
self.cache_dir = cache_dir
self.expires = expires
def __getitem__(self, url):
path = self.url_to_path(url)
if os.path.exists(path):
try:
result = json.load(open(path))
result['timestamp'] = datetime.strptime(result['timestamp'], '%Y-%m-%d %H:%M:%S.%f')
# 过期返回错误
if self.has_expired(result['timestamp']):
print url, 'expired'
raise KeyError(url + 'has expired')
else:
result['html'] = zlib.decompress(a2b_hex(result['html']))
return result
except ValueError as e:
# 数据损坏
self.__delitem__(url)
raise KeyError(url + 'data is broken')
else:
raise KeyError(url + 'does not exist')
def __setitem__(self, url, result):
path = self.url_to_path(url)
folder = os.path.dirname(path)
if not os.path.isdir(folder):
os.makedirs(folder)
result['html'] = hexlify(zlib.compress(result['html']))
result['timestamp'] = str(result['timestamp']
)
with open(path, 'wb') as f:
json.dump(result, f)
# 下面保证result的数据没有被改变,否则会在调用的时候出错,莫名其妙的错误
result['html'] = zlib.decompress(a2b_hex(result['html']))
result['timestamp'] = datetime.strptime(result['timestamp'], '%Y-%m-%d %H:%M:%S.%f')
def __delitem__(self, url):
path = self.url_to_path(url)
try:
os.remove(path)
except OSError:
pass
def url_to_path(self, url):
md5url = md5_str(url)
return os.path.realpath(os.path.join(self.cache_dir, md5url))
def has_expired(self, timestamp):
return datetime.utcnow() > (timestamp + self.expires)
def clear(self):
"""
删除所有cache
"""
if os.path.isdir(self.cache_dir):
shutil.rmtree(self.cache_dir)
if __name__ == '__main__':
from datetime import timedelta
link_crawler('http://example.webscraping.com/', delay=3, link_regex='/(index|view)',
max_urls=-1, cache=DiskCache(expires=timedelta(hours=1))) | disk_cache.py | import sys
reload(sys)
sys.setdefaultencoding('utf8')
import json
from link_crawler import link_crawler
from datetime import datetime, timedelta
import os
import shutil
import zlib
from hashlib import md5
import base64
a2b_hex = base64.binascii.a2b_hex
hexlify = base64.binascii.hexlify
md5_str = lambda x: md5(x).hexdigest()
class DiskCache:
"""
磁盘缓存
过期检测
数据压缩
"""
def __init__(self, cache_dir='cache', expires=timedelta(days=30)):
"""
cache主目录
过期时间
是否压缩
"""
self.cache_dir = cache_dir
self.expires = expires
def __getitem__(self, url):
path = self.url_to_path(url)
if os.path.exists(path):
try:
result = json.load(open(path))
result['timestamp'] = datetime.strptime(result['timestamp'], '%Y-%m-%d %H:%M:%S.%f')
# 过期返回错误
if self.has_expired(result['timestamp']):
print url, 'expired'
raise KeyError(url + 'has expired')
else:
result['html'] = zlib.decompress(a2b_hex(result['html']))
return result
except ValueError as e:
# 数据损坏
self.__delitem__(url)
raise KeyError(url + 'data is broken')
else:
raise KeyError(url + 'does not exist')
def __setitem__(self, url, result):
path = self.url_to_path(url)
folder = os.path.dirname(path)
if not os.path.isdir(folder):
os.makedirs(folder)
result['html'] = hexlify(zlib.compress(result['html']))
result['timestamp'] = str(result['timestamp']
)
with open(path, 'wb') as f:
json.dump(result, f)
# 下面保证result的数据没有被改变,否则会在调用的时候出错,莫名其妙的错误
result['html'] = zlib.decompress(a2b_hex(result['html']))
result['timestamp'] = datetime.strptime(result['timestamp'], '%Y-%m-%d %H:%M:%S.%f')
def __delitem__(self, url):
path = self.url_to_path(url)
try:
os.remove(path)
except OSError:
pass
def url_to_path(self, url):
md5url = md5_str(url)
return os.path.realpath(os.path.join(self.cache_dir, md5url))
def has_expired(self, timestamp):
return datetime.utcnow() > (timestamp + self.expires)
def clear(self):
"""
删除所有cache
"""
if os.path.isdir(self.cache_dir):
shutil.rmtree(self.cache_dir)
if __name__ == '__main__':
from datetime import timedelta
link_crawler('http://example.webscraping.com/', delay=3, link_regex='/(index|view)',
max_urls=-1, cache=DiskCache(expires=timedelta(hours=1))) | 0.197367 | 0.089733 |
from django.conf.urls import url
from django.urls import path
from .views import AdventCalendarView, AdventDoorView, QuizListView, QuizView, QuizResultView
from .views.quiz import quiz_reply, QuizScoreboardView, QuizResultDeleteView
from .views.user_test import test_result, TestView
from .views.advent import participate_in_competition, AdventDoorAdminView, reset_door, register_found_santa, SantaCountListView
from .views.code_golf import CodeGolf, code_golf_score, CodeTaskListView
from .views.color_picker import submitColorChoice
from .views.place import NewestPlaceView, PlaceView, get_place_grid, submit_place, get_place_history, get_place_updates, get_place_info
urlpatterns = [
url(r'^julekalender/(?P<year>\d+)/(?P<number>\d+)/$',
AdventDoorView.as_view(),
name="advent_door"),
url(r'^julekalender/(?P<year>\d+)/(?P<number>\d+)/delta$',
participate_in_competition,
name="advent_participate"),
url(r'^julekalender/(?P<year>\d+)/(?P<number>\d+)/admin$',
AdventDoorAdminView.as_view(),
name="advent_admin"),
url(r'^julekalender/(?P<year>\d+)/(?P<number>\d+)/admin/reset$',
reset_door,
name="advent_admin_reset"),
url(r'^julekalender/(?P<year>\d+)/$',
AdventCalendarView.as_view(),
name="advent_calendar"),
url(r'^julenisser/',
SantaCountListView.as_view(),
name="hidden_santa"),
url(r'^registersanta/(?P<santa_id>\d+)/(?P<redirect_url>[\w\-]+)/$',
register_found_santa,
name="register_santa"),
url(r'^quiz/$',
QuizListView.as_view(),
name="quiz_list"),
url(r'^quiz/(?P<pk>[0-9]+)$',
QuizView.as_view(),
name="quiz"),
url(r'^quiz/(?P<pk>[0-9]+)/reply$',
quiz_reply,
name="quiz_reply"),
url(r'^quiz/(?P<pk>[0-9]+)/reply/delete$',
QuizResultDeleteView.as_view(),
name="quiz_result_delete"),
url(r'^quiz/resultat/(?P<pk>[0-9]+)$',
QuizResultView.as_view(),
name="quiz_result"),
url(r'^quiz/highscore/(?P<pk>[0-9]+)$',
QuizScoreboardView.as_view(),
name="quiz_score"),
url(r'^brukertest/(?P<pk>[0-9]+)$',
TestView.as_view(),
name="user_test"),
url(r'^brukertest/(?P<pk>[0-9]+)/resultat$',
test_result,
name="test_result"),
url(r'^kodegolf/$', CodeTaskListView.as_view(), name="code_golf_menu"),
url(r'^kodegolf/(?P<task_id>[0-9]+)$', CodeGolf.as_view(), name="code_golf"),
url(r'^kodegolf/score/(?P<task_id>[0-9]+)$', code_golf_score, name="code_golf_score"),
# Color picker
url(r'^colorpicker/$', submitColorChoice, name="color_picker"),
path('place/', NewestPlaceView.as_view(), name="newest_place"),
path('place/<int:pk>', PlaceView.as_view(), name="place"),
path('place/<int:pk>/grid', get_place_grid, name="get_place_grid"),
path('place/<int:pk>/updates', get_place_updates, name="get_place_updates"),
path('place/<int:pk>/history', get_place_history, name="get_place_history"),
path('place/<int:pk>/info', get_place_info, name="get_place_info"),
path('place/<int:pk>/submit', submit_place, name="submit_place"),
] | nablapps/interactive/urls.py | from django.conf.urls import url
from django.urls import path
from .views import AdventCalendarView, AdventDoorView, QuizListView, QuizView, QuizResultView
from .views.quiz import quiz_reply, QuizScoreboardView, QuizResultDeleteView
from .views.user_test import test_result, TestView
from .views.advent import participate_in_competition, AdventDoorAdminView, reset_door, register_found_santa, SantaCountListView
from .views.code_golf import CodeGolf, code_golf_score, CodeTaskListView
from .views.color_picker import submitColorChoice
from .views.place import NewestPlaceView, PlaceView, get_place_grid, submit_place, get_place_history, get_place_updates, get_place_info
urlpatterns = [
url(r'^julekalender/(?P<year>\d+)/(?P<number>\d+)/$',
AdventDoorView.as_view(),
name="advent_door"),
url(r'^julekalender/(?P<year>\d+)/(?P<number>\d+)/delta$',
participate_in_competition,
name="advent_participate"),
url(r'^julekalender/(?P<year>\d+)/(?P<number>\d+)/admin$',
AdventDoorAdminView.as_view(),
name="advent_admin"),
url(r'^julekalender/(?P<year>\d+)/(?P<number>\d+)/admin/reset$',
reset_door,
name="advent_admin_reset"),
url(r'^julekalender/(?P<year>\d+)/$',
AdventCalendarView.as_view(),
name="advent_calendar"),
url(r'^julenisser/',
SantaCountListView.as_view(),
name="hidden_santa"),
url(r'^registersanta/(?P<santa_id>\d+)/(?P<redirect_url>[\w\-]+)/$',
register_found_santa,
name="register_santa"),
url(r'^quiz/$',
QuizListView.as_view(),
name="quiz_list"),
url(r'^quiz/(?P<pk>[0-9]+)$',
QuizView.as_view(),
name="quiz"),
url(r'^quiz/(?P<pk>[0-9]+)/reply$',
quiz_reply,
name="quiz_reply"),
url(r'^quiz/(?P<pk>[0-9]+)/reply/delete$',
QuizResultDeleteView.as_view(),
name="quiz_result_delete"),
url(r'^quiz/resultat/(?P<pk>[0-9]+)$',
QuizResultView.as_view(),
name="quiz_result"),
url(r'^quiz/highscore/(?P<pk>[0-9]+)$',
QuizScoreboardView.as_view(),
name="quiz_score"),
url(r'^brukertest/(?P<pk>[0-9]+)$',
TestView.as_view(),
name="user_test"),
url(r'^brukertest/(?P<pk>[0-9]+)/resultat$',
test_result,
name="test_result"),
url(r'^kodegolf/$', CodeTaskListView.as_view(), name="code_golf_menu"),
url(r'^kodegolf/(?P<task_id>[0-9]+)$', CodeGolf.as_view(), name="code_golf"),
url(r'^kodegolf/score/(?P<task_id>[0-9]+)$', code_golf_score, name="code_golf_score"),
# Color picker
url(r'^colorpicker/$', submitColorChoice, name="color_picker"),
path('place/', NewestPlaceView.as_view(), name="newest_place"),
path('place/<int:pk>', PlaceView.as_view(), name="place"),
path('place/<int:pk>/grid', get_place_grid, name="get_place_grid"),
path('place/<int:pk>/updates', get_place_updates, name="get_place_updates"),
path('place/<int:pk>/history', get_place_history, name="get_place_history"),
path('place/<int:pk>/info', get_place_info, name="get_place_info"),
path('place/<int:pk>/submit', submit_place, name="submit_place"),
] | 0.271059 | 0.137938 |
import operator
import unittest
from nose.plugins.skip import SkipTest
from mongoengine import connect
from mongoengine.connection import get_db, disconnect_all
from mongoengine.mongodb_support import get_mongodb_version, MONGODB_26, MONGODB_3, MONGODB_32, MONGODB_34
from mongoengine.pymongo_support import IS_PYMONGO_3
MONGO_TEST_DB = 'mongoenginetest' # standard name for the test database
class MongoDBTestCase(unittest.TestCase):
"""Base class for tests that need a mongodb connection
It ensures that the db is clean at the beginning and dropped at the end automatically
"""
@classmethod
def setUpClass(cls):
disconnect_all()
cls._connection = connect(db=MONGO_TEST_DB)
cls._connection.drop_database(MONGO_TEST_DB)
cls.db = get_db()
@classmethod
def tearDownClass(cls):
cls._connection.drop_database(MONGO_TEST_DB)
disconnect_all()
def get_as_pymongo(doc):
"""Fetch the pymongo version of a certain Document"""
return doc.__class__.objects.as_pymongo().get(id=doc.id)
def _decorated_with_ver_requirement(func, mongo_version_req, oper):
"""Return a given function decorated with the version requirement
for a particular MongoDB version tuple.
:param mongo_version_req: The mongodb version requirement (tuple(int, int))
:param oper: The operator to apply (e.g: operator.ge)
"""
def _inner(*args, **kwargs):
mongodb_v = get_mongodb_version()
if oper(mongodb_v, mongo_version_req):
return func(*args, **kwargs)
raise SkipTest('Needs MongoDB v{}+'.format('.'.join(str(n) for n in mongo_version_req)))
_inner.__name__ = func.__name__
_inner.__doc__ = func.__doc__
return _inner
def requires_mongodb_gte_34(func):
"""Raise a SkipTest exception if we're working with MongoDB version
lower than v3.4
"""
return _decorated_with_ver_requirement(func, MONGODB_34, oper=operator.ge)
def requires_mongodb_lte_32(func):
"""Raise a SkipTest exception if we're working with MongoDB version
greater than v3.2.
"""
return _decorated_with_ver_requirement(func, MONGODB_32, oper=operator.le)
def requires_mongodb_gte_26(func):
"""Raise a SkipTest exception if we're working with MongoDB version
lower than v2.6.
"""
return _decorated_with_ver_requirement(func, MONGODB_26, oper=operator.ge)
def requires_mongodb_gte_3(func):
"""Raise a SkipTest exception if we're working with MongoDB version
lower than v3.0.
"""
return _decorated_with_ver_requirement(func, MONGODB_3, oper=operator.ge)
def skip_pymongo3(f):
"""Raise a SkipTest exception if we're running a test against
PyMongo v3.x.
"""
def _inner(*args, **kwargs):
if IS_PYMONGO_3:
raise SkipTest("Useless with PyMongo 3+")
return f(*args, **kwargs)
_inner.__name__ = f.__name__
_inner.__doc__ = f.__doc__
return _inner | tests/utils.py | import operator
import unittest
from nose.plugins.skip import SkipTest
from mongoengine import connect
from mongoengine.connection import get_db, disconnect_all
from mongoengine.mongodb_support import get_mongodb_version, MONGODB_26, MONGODB_3, MONGODB_32, MONGODB_34
from mongoengine.pymongo_support import IS_PYMONGO_3
MONGO_TEST_DB = 'mongoenginetest' # standard name for the test database
class MongoDBTestCase(unittest.TestCase):
"""Base class for tests that need a mongodb connection
It ensures that the db is clean at the beginning and dropped at the end automatically
"""
@classmethod
def setUpClass(cls):
disconnect_all()
cls._connection = connect(db=MONGO_TEST_DB)
cls._connection.drop_database(MONGO_TEST_DB)
cls.db = get_db()
@classmethod
def tearDownClass(cls):
cls._connection.drop_database(MONGO_TEST_DB)
disconnect_all()
def get_as_pymongo(doc):
"""Fetch the pymongo version of a certain Document"""
return doc.__class__.objects.as_pymongo().get(id=doc.id)
def _decorated_with_ver_requirement(func, mongo_version_req, oper):
"""Return a given function decorated with the version requirement
for a particular MongoDB version tuple.
:param mongo_version_req: The mongodb version requirement (tuple(int, int))
:param oper: The operator to apply (e.g: operator.ge)
"""
def _inner(*args, **kwargs):
mongodb_v = get_mongodb_version()
if oper(mongodb_v, mongo_version_req):
return func(*args, **kwargs)
raise SkipTest('Needs MongoDB v{}+'.format('.'.join(str(n) for n in mongo_version_req)))
_inner.__name__ = func.__name__
_inner.__doc__ = func.__doc__
return _inner
def requires_mongodb_gte_34(func):
"""Raise a SkipTest exception if we're working with MongoDB version
lower than v3.4
"""
return _decorated_with_ver_requirement(func, MONGODB_34, oper=operator.ge)
def requires_mongodb_lte_32(func):
"""Raise a SkipTest exception if we're working with MongoDB version
greater than v3.2.
"""
return _decorated_with_ver_requirement(func, MONGODB_32, oper=operator.le)
def requires_mongodb_gte_26(func):
"""Raise a SkipTest exception if we're working with MongoDB version
lower than v2.6.
"""
return _decorated_with_ver_requirement(func, MONGODB_26, oper=operator.ge)
def requires_mongodb_gte_3(func):
"""Raise a SkipTest exception if we're working with MongoDB version
lower than v3.0.
"""
return _decorated_with_ver_requirement(func, MONGODB_3, oper=operator.ge)
def skip_pymongo3(f):
"""Raise a SkipTest exception if we're running a test against
PyMongo v3.x.
"""
def _inner(*args, **kwargs):
if IS_PYMONGO_3:
raise SkipTest("Useless with PyMongo 3+")
return f(*args, **kwargs)
_inner.__name__ = f.__name__
_inner.__doc__ = f.__doc__
return _inner | 0.628977 | 0.287943 |
from fastapi import APIRouter, Depends, status, Response
from typing import Optional
from pydantic import BaseModel
from core.config import (
ALLOWED_HOSTS,
PROJECT_NAME,
PROJECT_VERSION,
API_PORT,
DATABASE_NAME,
NER_LABEL_COLLECTION,
Feedback_Template_Collection,
Feedback_Suggestion_Collection,
LABEL_COLLECTION,
)
from db.mongodb import AsyncIOMotorClient, get_database
import asyncio
from typing import Any, Dict, AnyStr, List, Union
from datetime import datetime
JSONObject = Dict[AnyStr, Any]
JSONArray = List[Any]
JSONStructure = Union[JSONArray, JSONObject]
router = APIRouter()
class suggest_best_practice_by_QA_model_body(BaseModel):
question: str = "Who is the buyer?"
context: str = "Dan (the seller) Will be deemed to have completed its delivery obligations before 2021-7-5 if in Niall's opinion, the Jeep Car satisfies the Acceptance Criteria, and Niall (the buyer) notifies Dan in writing that it is accepting the Jeep Car."
from utils.QA_model import get_QA_model_answer
SUGGESTION_PREDICT_TAGS = ["Predict"]
@router.post("/models/QA/suggestion", tags = SUGGESTION_PREDICT_TAGS)
async def suggest_best_practice_by_QA_model(response: Response, data: suggest_best_practice_by_QA_model_body):
"""Example Questions:\n"What is the Item to be sell?",\n"Who is the buyer?",\n"Who is the seller?","What is the due date?"\n\n# Example Context: \n"Dan (the seller) Will be deemed to have completed its delivery obligations before 2021-7-5 if in Niall's opinion, the Jeep Car satisfies the Acceptance Criteria, and Niall (the buyer) notifies Dan in writing that it is accepting the Jeep Car." """
response.status_code = status.HTTP_200_OK
return {
"prediction": get_QA_model_answer(data.question, data.context),
}
@router.get("/models/QA/suggestion", tags = SUGGESTION_PREDICT_TAGS)
async def get_QA_model_status(response: Response):
response.status_code = status.HTTP_200_OK
return {
"message": "get success",
"status": "online"
} | api/api_v1/endpoints/suggestion_predict.py | from fastapi import APIRouter, Depends, status, Response
from typing import Optional
from pydantic import BaseModel
from core.config import (
ALLOWED_HOSTS,
PROJECT_NAME,
PROJECT_VERSION,
API_PORT,
DATABASE_NAME,
NER_LABEL_COLLECTION,
Feedback_Template_Collection,
Feedback_Suggestion_Collection,
LABEL_COLLECTION,
)
from db.mongodb import AsyncIOMotorClient, get_database
import asyncio
from typing import Any, Dict, AnyStr, List, Union
from datetime import datetime
JSONObject = Dict[AnyStr, Any]
JSONArray = List[Any]
JSONStructure = Union[JSONArray, JSONObject]
router = APIRouter()
class suggest_best_practice_by_QA_model_body(BaseModel):
question: str = "Who is the buyer?"
context: str = "Dan (the seller) Will be deemed to have completed its delivery obligations before 2021-7-5 if in Niall's opinion, the Jeep Car satisfies the Acceptance Criteria, and Niall (the buyer) notifies Dan in writing that it is accepting the Jeep Car."
from utils.QA_model import get_QA_model_answer
SUGGESTION_PREDICT_TAGS = ["Predict"]
@router.post("/models/QA/suggestion", tags = SUGGESTION_PREDICT_TAGS)
async def suggest_best_practice_by_QA_model(response: Response, data: suggest_best_practice_by_QA_model_body):
"""Example Questions:\n"What is the Item to be sell?",\n"Who is the buyer?",\n"Who is the seller?","What is the due date?"\n\n# Example Context: \n"Dan (the seller) Will be deemed to have completed its delivery obligations before 2021-7-5 if in Niall's opinion, the Jeep Car satisfies the Acceptance Criteria, and Niall (the buyer) notifies Dan in writing that it is accepting the Jeep Car." """
response.status_code = status.HTTP_200_OK
return {
"prediction": get_QA_model_answer(data.question, data.context),
}
@router.get("/models/QA/suggestion", tags = SUGGESTION_PREDICT_TAGS)
async def get_QA_model_status(response: Response):
response.status_code = status.HTTP_200_OK
return {
"message": "get success",
"status": "online"
} | 0.683525 | 0.097691 |
from abc import abstractmethod
from pants.backend.native.config.environment import CppToolchain, CToolchain
from pants.backend.native.subsystems.native_build_settings import NativeBuildSettings
from pants.backend.native.subsystems.native_build_step import NativeBuildStep
from pants.backend.native.subsystems.native_toolchain import (NativeToolchain,
ToolchainVariantRequest)
from pants.backend.native.targets.native_library import NativeLibrary
from pants.backend.native.targets.packaged_native_library import PackagedNativeLibrary
from pants.build_graph.dependency_context import DependencyContext
from pants.task.task import Task
from pants.util.collections import assert_single_element
from pants.util.memo import memoized_method, memoized_property
from pants.util.meta import classproperty
from pants.util.objects import Exactly, SubclassesOf
class NativeTask(Task):
@classproperty
@abstractmethod
def source_target_constraint(cls):
"""Return a type constraint which is used to filter "source" targets for this task.
This is used to make it clearer which tasks act on which targets, since the compile and link
tasks work on different target sets (just C and just C++ in the compile tasks, and both in the
link task).
:return: :class:`pants.util.objects.TypeConstraint`
"""
@classproperty
def dependent_target_constraint(cls):
"""Return a type constraint which is used to filter dependencies for a target.
This is used to make native_deps() calculation automatic and declarative.
:return: :class:`pants.util.objects.TypeConstraint`
"""
return SubclassesOf(NativeLibrary)
@classproperty
def packaged_dependent_constraint(cls):
"""Return a type constraint which is used to filter 3rdparty dependencies for a target.
This is used to make packaged_native_deps() automatic and declarative.
:return: :class:`pants.util.objects.TypeConstraint`
"""
return Exactly(PackagedNativeLibrary)
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (
# We use a globally-scoped dependency on NativeBuildSettings because the toolchain and
# dependency calculation need to be the same for both compile and link tasks (and subscoping
# would break that).
NativeBuildSettings,
NativeToolchain.scoped(cls),
)
@classmethod
def prepare(cls, options, round_manager):
super().prepare(options, round_manager)
# Allow the deferred_sources_mapping to take place first
round_manager.optional_data('deferred_sources')
@classmethod
def implementation_version(cls):
return super().implementation_version() + [('NativeTask', 0)]
@memoized_property
def _native_build_settings(self):
return NativeBuildSettings.global_instance()
# TODO(#7183): remove this global subsystem dependency!
@memoized_property
def _native_build_step(self):
return NativeBuildStep.global_instance()
@memoized_property
def _native_toolchain(self):
return NativeToolchain.scoped_instance(self)
def _toolchain_variant_request(self, variant):
return ToolchainVariantRequest(
toolchain=self._native_toolchain,
variant=variant)
def get_c_toolchain_variant(self, native_library_target):
return self._get_toolchain_variant(CToolchain, native_library_target)
def get_cpp_toolchain_variant(self, native_library_target):
return self._get_toolchain_variant(CppToolchain, native_library_target)
def _get_toolchain_variant(self, toolchain_type, native_library_target):
selected_variant = self._native_build_step.get_toolchain_variant_for_target(
native_library_target)
return self._request_single(toolchain_type, self._toolchain_variant_request(selected_variant))
@memoized_method
def native_deps(self, target):
return self.strict_deps_for_target(
target, predicate=self.dependent_target_constraint.satisfied_by)
@memoized_method
def packaged_native_deps(self, target):
return self.strict_deps_for_target(
target, predicate=self.packaged_dependent_constraint.satisfied_by)
def strict_deps_for_target(self, target, predicate=None):
"""Get the dependencies of `target` filtered by `predicate`, accounting for 'strict_deps'.
If 'strict_deps' is on, instead of using the transitive closure of dependencies, targets will
only be able to see their immediate dependencies declared in the BUILD file. The 'strict_deps'
setting is obtained from the result of `get_compile_settings()`.
NB: This includes the current target in the result.
"""
if self._native_build_settings.get_strict_deps_value_for_target(target):
strict_deps = target.strict_dependencies(DependencyContext())
if predicate:
filtered_deps = list(filter(predicate, strict_deps))
else:
filtered_deps = strict_deps
deps = [target] + filtered_deps
else:
deps = self.context.build_graph.transitive_subgraph_of_addresses(
[target.address], predicate=predicate)
# Filter out the beginning target depending on whether it matches the predicate.
# TODO: There should be a cleaner way to do this.
deps = filter(predicate, deps)
return deps
def _add_product_at_target_base(self, product_mapping, target, value):
product_mapping.add(target, target.target_base).append(value)
def _retrieve_single_product_at_target_base(self, product_mapping, target):
self.context.log.debug("product_mapping: {}".format(product_mapping))
self.context.log.debug("target: {}".format(target))
product = product_mapping.get(target)
single_base_dir = assert_single_element(product.keys())
single_product = assert_single_element(product[single_base_dir])
return single_product
# TODO(#5869): delete this when we can request Subsystems from options in tasks!
def _request_single(self, product, subject):
# NB: This is not supposed to be exposed to Tasks yet -- see #4769 to track the status of
# exposing v2 products in v1 tasks.
return self.context._scheduler.product_request(product, [subject])[0] | src/python/pants/backend/native/tasks/native_task.py |
from abc import abstractmethod
from pants.backend.native.config.environment import CppToolchain, CToolchain
from pants.backend.native.subsystems.native_build_settings import NativeBuildSettings
from pants.backend.native.subsystems.native_build_step import NativeBuildStep
from pants.backend.native.subsystems.native_toolchain import (NativeToolchain,
ToolchainVariantRequest)
from pants.backend.native.targets.native_library import NativeLibrary
from pants.backend.native.targets.packaged_native_library import PackagedNativeLibrary
from pants.build_graph.dependency_context import DependencyContext
from pants.task.task import Task
from pants.util.collections import assert_single_element
from pants.util.memo import memoized_method, memoized_property
from pants.util.meta import classproperty
from pants.util.objects import Exactly, SubclassesOf
class NativeTask(Task):
@classproperty
@abstractmethod
def source_target_constraint(cls):
"""Return a type constraint which is used to filter "source" targets for this task.
This is used to make it clearer which tasks act on which targets, since the compile and link
tasks work on different target sets (just C and just C++ in the compile tasks, and both in the
link task).
:return: :class:`pants.util.objects.TypeConstraint`
"""
@classproperty
def dependent_target_constraint(cls):
"""Return a type constraint which is used to filter dependencies for a target.
This is used to make native_deps() calculation automatic and declarative.
:return: :class:`pants.util.objects.TypeConstraint`
"""
return SubclassesOf(NativeLibrary)
@classproperty
def packaged_dependent_constraint(cls):
"""Return a type constraint which is used to filter 3rdparty dependencies for a target.
This is used to make packaged_native_deps() automatic and declarative.
:return: :class:`pants.util.objects.TypeConstraint`
"""
return Exactly(PackagedNativeLibrary)
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (
# We use a globally-scoped dependency on NativeBuildSettings because the toolchain and
# dependency calculation need to be the same for both compile and link tasks (and subscoping
# would break that).
NativeBuildSettings,
NativeToolchain.scoped(cls),
)
@classmethod
def prepare(cls, options, round_manager):
super().prepare(options, round_manager)
# Allow the deferred_sources_mapping to take place first
round_manager.optional_data('deferred_sources')
@classmethod
def implementation_version(cls):
return super().implementation_version() + [('NativeTask', 0)]
@memoized_property
def _native_build_settings(self):
return NativeBuildSettings.global_instance()
# TODO(#7183): remove this global subsystem dependency!
@memoized_property
def _native_build_step(self):
return NativeBuildStep.global_instance()
@memoized_property
def _native_toolchain(self):
return NativeToolchain.scoped_instance(self)
def _toolchain_variant_request(self, variant):
return ToolchainVariantRequest(
toolchain=self._native_toolchain,
variant=variant)
def get_c_toolchain_variant(self, native_library_target):
return self._get_toolchain_variant(CToolchain, native_library_target)
def get_cpp_toolchain_variant(self, native_library_target):
return self._get_toolchain_variant(CppToolchain, native_library_target)
def _get_toolchain_variant(self, toolchain_type, native_library_target):
selected_variant = self._native_build_step.get_toolchain_variant_for_target(
native_library_target)
return self._request_single(toolchain_type, self._toolchain_variant_request(selected_variant))
@memoized_method
def native_deps(self, target):
return self.strict_deps_for_target(
target, predicate=self.dependent_target_constraint.satisfied_by)
@memoized_method
def packaged_native_deps(self, target):
return self.strict_deps_for_target(
target, predicate=self.packaged_dependent_constraint.satisfied_by)
def strict_deps_for_target(self, target, predicate=None):
"""Get the dependencies of `target` filtered by `predicate`, accounting for 'strict_deps'.
If 'strict_deps' is on, instead of using the transitive closure of dependencies, targets will
only be able to see their immediate dependencies declared in the BUILD file. The 'strict_deps'
setting is obtained from the result of `get_compile_settings()`.
NB: This includes the current target in the result.
"""
if self._native_build_settings.get_strict_deps_value_for_target(target):
strict_deps = target.strict_dependencies(DependencyContext())
if predicate:
filtered_deps = list(filter(predicate, strict_deps))
else:
filtered_deps = strict_deps
deps = [target] + filtered_deps
else:
deps = self.context.build_graph.transitive_subgraph_of_addresses(
[target.address], predicate=predicate)
# Filter out the beginning target depending on whether it matches the predicate.
# TODO: There should be a cleaner way to do this.
deps = filter(predicate, deps)
return deps
def _add_product_at_target_base(self, product_mapping, target, value):
product_mapping.add(target, target.target_base).append(value)
def _retrieve_single_product_at_target_base(self, product_mapping, target):
self.context.log.debug("product_mapping: {}".format(product_mapping))
self.context.log.debug("target: {}".format(target))
product = product_mapping.get(target)
single_base_dir = assert_single_element(product.keys())
single_product = assert_single_element(product[single_base_dir])
return single_product
# TODO(#5869): delete this when we can request Subsystems from options in tasks!
def _request_single(self, product, subject):
# NB: This is not supposed to be exposed to Tasks yet -- see #4769 to track the status of
# exposing v2 products in v1 tasks.
return self.context._scheduler.product_request(product, [subject])[0] | 0.82308 | 0.182426 |
import time
from datetime import datetime
import testify as T
from osxcollector import osxcollector
def _convert_to_utc(func):
'''Local time to UTC conversion
source: http://feihonghsu.blogspot.com/2008/02/converting-from-local-time-to-utc.html
'''
def wrapper(dt):
dt_utc = datetime.utcfromtimestamp(time.mktime(dt.timetuple()))
return func(dt_utc)
return wrapper
@_convert_to_utc
def _datetime_to_seconds_since_2001(dt):
return (dt - osxcollector.DATETIME_2001).total_seconds()
@_convert_to_utc
def _datetime_to_seconds_since_epoch(dt):
return (dt - osxcollector.DATETIME_1970).total_seconds()
@_convert_to_utc
def _datetime_to_microseconds_since_epoch(dt):
return (dt - osxcollector.DATETIME_1970).total_seconds() * 1e6
@_convert_to_utc
def _datetime_to_microseconds_since_1601(dt):
return (dt - osxcollector.DATETIME_1601).total_seconds() * 1e6
DT_BEFORE_MIN = datetime(2002, 7, 8, 14, 28, 22)
"""Date before minimum date"""
DT_VALID = datetime(2014, 7, 8, 14, 28, 22)
"""Valid date that should not cause problems after convertion"""
DT_FUTURE = datetime(datetime.now().year + 1, 7, 8, 14, 28, 22)
"""Date in the future"""
DT_VALID_AS_STRING = '2014-07-08 14:28:22'
"""String representation of DT_VALID"""
class SecondsSince2001ToDatetimeTestCase(T.TestCase):
def test_seconds_since_2001_to_datetime(self):
sec_since_2001 = _datetime_to_seconds_since_2001(DT_VALID)
dt = osxcollector._seconds_since_2001_to_datetime(sec_since_2001)
T.assert_equal(dt, DT_VALID)
def test_datetime_before_min_year(self):
sec_since_2001 = _datetime_to_seconds_since_2001(DT_BEFORE_MIN)
dt = osxcollector._seconds_since_2001_to_datetime(sec_since_2001)
T.assert_equal(dt, None)
def test_datetime_in_future(self):
sec_since_2001 = _datetime_to_seconds_since_2001(DT_FUTURE)
dt = osxcollector._seconds_since_2001_to_datetime(sec_since_2001)
T.assert_equal(dt, None)
class SecondsSinceEpochToDatetimeTestCase(T.TestCase):
def test_seconds_since_epoch_to_datetime(self):
sec_since_epoch = _datetime_to_seconds_since_epoch(DT_VALID)
dt = osxcollector._seconds_since_epoch_to_datetime(sec_since_epoch)
T.assert_equal(dt, DT_VALID)
def test_datetime_before_min_year(self):
sec_since_epoch = _datetime_to_seconds_since_epoch(DT_BEFORE_MIN)
dt = osxcollector._seconds_since_epoch_to_datetime(sec_since_epoch)
T.assert_equal(dt, None)
def test_datetime_in_future(self):
sec_since_epoch = _datetime_to_seconds_since_epoch(DT_FUTURE)
dt = osxcollector._seconds_since_epoch_to_datetime(sec_since_epoch)
T.assert_equal(dt, None)
class MicrosecondsSinceEpochToDatetimeTestCase(T.TestCase):
def test_microseconds_since_epoch_to_datetime(self):
microsec_since_epoch = _datetime_to_microseconds_since_epoch(DT_VALID)
dt = osxcollector._microseconds_since_epoch_to_datetime(microsec_since_epoch)
T.assert_equal(dt, DT_VALID)
def test_datetime_before_min_year(self):
microsec_since_epoch = _datetime_to_microseconds_since_epoch(DT_BEFORE_MIN)
dt = osxcollector._microseconds_since_epoch_to_datetime(microsec_since_epoch)
T.assert_equal(dt, None)
def test_datetime_in_future(self):
microsec_since_epoch = _datetime_to_microseconds_since_epoch(DT_FUTURE)
dt = osxcollector._microseconds_since_epoch_to_datetime(microsec_since_epoch)
T.assert_equal(dt, None)
class MicrosecondsSince1601ToDatetimeTestCase(T.TestCase):
def test_microseconds_since_1601_to_datetime(self):
ms_since_1601 = _datetime_to_microseconds_since_1601(DT_VALID)
dt = osxcollector._microseconds_since_1601_to_datetime(ms_since_1601)
T.assert_equal(dt, DT_VALID)
def test_datetime_before_min_year(self):
ms_since_1601 = _datetime_to_microseconds_since_1601(DT_BEFORE_MIN)
dt = osxcollector._microseconds_since_1601_to_datetime(ms_since_1601)
T.assert_equal(dt, None)
def test_datetime_in_future(self):
ms_since_1601 = _datetime_to_microseconds_since_1601(DT_FUTURE)
dt = osxcollector._microseconds_since_1601_to_datetime(ms_since_1601)
T.assert_equal(dt, None)
class ValueToDatetimeTestCase(T.TestCase):
"""Tests whether the _value_to_datetime function works correctly for all of the different
date formats. That way we should know that the heuristic regarding the order
of the convertion calls for the specific date format inside this function works fine.
"""
def test_seconds_since_2001_to_datetime(self):
sec_since_2001 = _datetime_to_seconds_since_2001(DT_VALID)
dt = osxcollector._value_to_datetime(sec_since_2001)
T.assert_equal(dt, DT_VALID)
def test_seconds_since_epoch_to_datetime(self):
sec_since_epoch = _datetime_to_seconds_since_epoch(DT_VALID)
dt = osxcollector._value_to_datetime(sec_since_epoch)
T.assert_equal(dt, DT_VALID)
def test_microseconds_since_epoch_to_datetime(self):
microsec_since_epoch = _datetime_to_microseconds_since_epoch(DT_VALID)
dt = osxcollector._value_to_datetime(microsec_since_epoch)
T.assert_equal(dt, DT_VALID)
def test_microseconds_since_1601_to_datetime(self):
microsec_since_1601 = _datetime_to_microseconds_since_1601(DT_VALID)
dt = osxcollector._value_to_datetime(microsec_since_1601)
T.assert_equal(dt, DT_VALID)
class NormalizeValueTestCase(T.TestCase):
"""Tests _normalize_val function."""
def test_normalize_basestring(self):
s = "basestring here"
val = osxcollector._normalize_val(s)
T.assert_equal(s, val)
def test_normalize_unicode(self):
u = '\u20AC'
val = osxcollector._normalize_val(u)
T.assert_truthy(isinstance(val, unicode))
def test_normalize_unicode_error(self):
s = 'Was\x9f'
val = osxcollector._normalize_val(s)
T.assert_falsey(isinstance(val, unicode))
def test_normalize_buffer_to_unicode(self):
b = buffer("this is buffer")
val = osxcollector._normalize_val(b)
T.assert_truthy(isinstance(val, unicode))
def test_normalize_datetime(self):
"""Tests whether timestamps are resolved to datetime string representation,
based on passed key value."""
keys_that_hint_about_being_a_date = ["start date:", "TIME FINISHED", "in UTC", "event date"]
for key in keys_that_hint_about_being_a_date:
sec_since_2001 = _datetime_to_seconds_since_2001(DT_VALID)
val = osxcollector._normalize_val(sec_since_2001, key)
T.assert_equal(DT_VALID_AS_STRING, val)
# key contains 'date' however the value is not date
not_a_date = "yes, it includes"
val = osxcollector._normalize_val(not_a_date, "includes_dates")
T.assert_equal(not_a_date, val) | osxc/tests/osxcollector_value_normalization_test.py | import time
from datetime import datetime
import testify as T
from osxcollector import osxcollector
def _convert_to_utc(func):
'''Local time to UTC conversion
source: http://feihonghsu.blogspot.com/2008/02/converting-from-local-time-to-utc.html
'''
def wrapper(dt):
dt_utc = datetime.utcfromtimestamp(time.mktime(dt.timetuple()))
return func(dt_utc)
return wrapper
@_convert_to_utc
def _datetime_to_seconds_since_2001(dt):
return (dt - osxcollector.DATETIME_2001).total_seconds()
@_convert_to_utc
def _datetime_to_seconds_since_epoch(dt):
return (dt - osxcollector.DATETIME_1970).total_seconds()
@_convert_to_utc
def _datetime_to_microseconds_since_epoch(dt):
return (dt - osxcollector.DATETIME_1970).total_seconds() * 1e6
@_convert_to_utc
def _datetime_to_microseconds_since_1601(dt):
return (dt - osxcollector.DATETIME_1601).total_seconds() * 1e6
DT_BEFORE_MIN = datetime(2002, 7, 8, 14, 28, 22)
"""Date before minimum date"""
DT_VALID = datetime(2014, 7, 8, 14, 28, 22)
"""Valid date that should not cause problems after convertion"""
DT_FUTURE = datetime(datetime.now().year + 1, 7, 8, 14, 28, 22)
"""Date in the future"""
DT_VALID_AS_STRING = '2014-07-08 14:28:22'
"""String representation of DT_VALID"""
class SecondsSince2001ToDatetimeTestCase(T.TestCase):
def test_seconds_since_2001_to_datetime(self):
sec_since_2001 = _datetime_to_seconds_since_2001(DT_VALID)
dt = osxcollector._seconds_since_2001_to_datetime(sec_since_2001)
T.assert_equal(dt, DT_VALID)
def test_datetime_before_min_year(self):
sec_since_2001 = _datetime_to_seconds_since_2001(DT_BEFORE_MIN)
dt = osxcollector._seconds_since_2001_to_datetime(sec_since_2001)
T.assert_equal(dt, None)
def test_datetime_in_future(self):
sec_since_2001 = _datetime_to_seconds_since_2001(DT_FUTURE)
dt = osxcollector._seconds_since_2001_to_datetime(sec_since_2001)
T.assert_equal(dt, None)
class SecondsSinceEpochToDatetimeTestCase(T.TestCase):
def test_seconds_since_epoch_to_datetime(self):
sec_since_epoch = _datetime_to_seconds_since_epoch(DT_VALID)
dt = osxcollector._seconds_since_epoch_to_datetime(sec_since_epoch)
T.assert_equal(dt, DT_VALID)
def test_datetime_before_min_year(self):
sec_since_epoch = _datetime_to_seconds_since_epoch(DT_BEFORE_MIN)
dt = osxcollector._seconds_since_epoch_to_datetime(sec_since_epoch)
T.assert_equal(dt, None)
def test_datetime_in_future(self):
sec_since_epoch = _datetime_to_seconds_since_epoch(DT_FUTURE)
dt = osxcollector._seconds_since_epoch_to_datetime(sec_since_epoch)
T.assert_equal(dt, None)
class MicrosecondsSinceEpochToDatetimeTestCase(T.TestCase):
def test_microseconds_since_epoch_to_datetime(self):
microsec_since_epoch = _datetime_to_microseconds_since_epoch(DT_VALID)
dt = osxcollector._microseconds_since_epoch_to_datetime(microsec_since_epoch)
T.assert_equal(dt, DT_VALID)
def test_datetime_before_min_year(self):
microsec_since_epoch = _datetime_to_microseconds_since_epoch(DT_BEFORE_MIN)
dt = osxcollector._microseconds_since_epoch_to_datetime(microsec_since_epoch)
T.assert_equal(dt, None)
def test_datetime_in_future(self):
microsec_since_epoch = _datetime_to_microseconds_since_epoch(DT_FUTURE)
dt = osxcollector._microseconds_since_epoch_to_datetime(microsec_since_epoch)
T.assert_equal(dt, None)
class MicrosecondsSince1601ToDatetimeTestCase(T.TestCase):
def test_microseconds_since_1601_to_datetime(self):
ms_since_1601 = _datetime_to_microseconds_since_1601(DT_VALID)
dt = osxcollector._microseconds_since_1601_to_datetime(ms_since_1601)
T.assert_equal(dt, DT_VALID)
def test_datetime_before_min_year(self):
ms_since_1601 = _datetime_to_microseconds_since_1601(DT_BEFORE_MIN)
dt = osxcollector._microseconds_since_1601_to_datetime(ms_since_1601)
T.assert_equal(dt, None)
def test_datetime_in_future(self):
ms_since_1601 = _datetime_to_microseconds_since_1601(DT_FUTURE)
dt = osxcollector._microseconds_since_1601_to_datetime(ms_since_1601)
T.assert_equal(dt, None)
class ValueToDatetimeTestCase(T.TestCase):
"""Tests whether the _value_to_datetime function works correctly for all of the different
date formats. That way we should know that the heuristic regarding the order
of the convertion calls for the specific date format inside this function works fine.
"""
def test_seconds_since_2001_to_datetime(self):
sec_since_2001 = _datetime_to_seconds_since_2001(DT_VALID)
dt = osxcollector._value_to_datetime(sec_since_2001)
T.assert_equal(dt, DT_VALID)
def test_seconds_since_epoch_to_datetime(self):
sec_since_epoch = _datetime_to_seconds_since_epoch(DT_VALID)
dt = osxcollector._value_to_datetime(sec_since_epoch)
T.assert_equal(dt, DT_VALID)
def test_microseconds_since_epoch_to_datetime(self):
microsec_since_epoch = _datetime_to_microseconds_since_epoch(DT_VALID)
dt = osxcollector._value_to_datetime(microsec_since_epoch)
T.assert_equal(dt, DT_VALID)
def test_microseconds_since_1601_to_datetime(self):
microsec_since_1601 = _datetime_to_microseconds_since_1601(DT_VALID)
dt = osxcollector._value_to_datetime(microsec_since_1601)
T.assert_equal(dt, DT_VALID)
class NormalizeValueTestCase(T.TestCase):
"""Tests _normalize_val function."""
def test_normalize_basestring(self):
s = "basestring here"
val = osxcollector._normalize_val(s)
T.assert_equal(s, val)
def test_normalize_unicode(self):
u = '\u20AC'
val = osxcollector._normalize_val(u)
T.assert_truthy(isinstance(val, unicode))
def test_normalize_unicode_error(self):
s = 'Was\x9f'
val = osxcollector._normalize_val(s)
T.assert_falsey(isinstance(val, unicode))
def test_normalize_buffer_to_unicode(self):
b = buffer("this is buffer")
val = osxcollector._normalize_val(b)
T.assert_truthy(isinstance(val, unicode))
def test_normalize_datetime(self):
"""Tests whether timestamps are resolved to datetime string representation,
based on passed key value."""
keys_that_hint_about_being_a_date = ["start date:", "TIME FINISHED", "in UTC", "event date"]
for key in keys_that_hint_about_being_a_date:
sec_since_2001 = _datetime_to_seconds_since_2001(DT_VALID)
val = osxcollector._normalize_val(sec_since_2001, key)
T.assert_equal(DT_VALID_AS_STRING, val)
# key contains 'date' however the value is not date
not_a_date = "yes, it includes"
val = osxcollector._normalize_val(not_a_date, "includes_dates")
T.assert_equal(not_a_date, val) | 0.629091 | 0.544196 |
import json
from rocketchat_API.APIExceptions.RocketExceptions import RocketMissingParamException
from rocketchat_API.APISections.base import RocketChatBase
class RocketChatChannels(RocketChatBase):
def channels_list(self, **kwargs):
"""Retrieves all of the channels from the server."""
return self.call_api_get("channels.list", kwargs=kwargs)
def channels_list_joined(self, **kwargs):
"""Lists all of the channels the calling user has joined"""
return self.call_api_get("channels.list.joined", kwargs=kwargs)
def channels_info(self, room_id=None, channel=None, **kwargs):
"""Gets a channel’s information."""
if room_id:
return self.call_api_get("channels.info", roomId=room_id, kwargs=kwargs)
if channel:
return self.call_api_get("channels.info", roomName=channel, kwargs=kwargs)
raise RocketMissingParamException("room_id or channel required")
def channels_history(self, room_id, **kwargs):
"""Retrieves the messages from a channel."""
return self.call_api_get("channels.history", roomId=room_id, kwargs=kwargs)
def channels_add_all(self, room_id, **kwargs):
"""Adds all of the users of the Rocket.Chat server to the channel."""
return self.call_api_post("channels.addAll", roomId=room_id, kwargs=kwargs)
def channels_add_moderator(self, room_id, user_id, **kwargs):
"""Gives the role of moderator for a user in the current channel."""
return self.call_api_post(
"channels.addModerator", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_remove_moderator(self, room_id, user_id, **kwargs):
"""Removes the role of moderator from a user in the current channel."""
return self.call_api_post(
"channels.removeModerator", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_moderators(self, room_id=None, channel=None, **kwargs):
"""Lists all moderators of a channel."""
if room_id:
return self.call_api_get(
"channels.moderators", roomId=room_id, kwargs=kwargs
)
if channel:
return self.call_api_get(
"channels.moderators", roomName=channel, kwargs=kwargs
)
raise RocketMissingParamException("room_id or channel required")
def channels_add_owner(self, room_id, user_id=None, username=None, **kwargs):
"""Gives the role of owner for a user in the current channel."""
if user_id:
return self.call_api_post(
"channels.addOwner", roomId=room_id, userId=user_id, kwargs=kwargs
)
if username:
return self.call_api_post(
"channels.addOwner", roomId=room_id, username=username, kwargs=kwargs
)
raise RocketMissingParamException("userID or username required")
def channels_remove_owner(self, room_id, user_id, **kwargs):
"""Removes the role of owner from a user in the current channel."""
return self.call_api_post(
"channels.removeOwner", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_add_leader(self, room_id, user_id, **kwargs):
"""Gives the role of Leader for a user in the current channel."""
return self.call_api_post(
"channels.addLeader", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_remove_leader(self, room_id, user_id, **kwargs):
"""Removes the role of Leader for a user in the current channel."""
return self.call_api_post(
"channels.removeLeader", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_archive(self, room_id, **kwargs):
"""Archives a channel."""
return self.call_api_post("channels.archive", roomId=room_id, kwargs=kwargs)
def channels_unarchive(self, room_id, **kwargs):
"""Unarchives a channel."""
return self.call_api_post("channels.unarchive", roomId=room_id, kwargs=kwargs)
def channels_close(self, room_id, **kwargs):
"""Removes the channel from the user’s list of channels."""
return self.call_api_post("channels.close", roomId=room_id, kwargs=kwargs)
def channels_open(self, room_id, **kwargs):
"""Adds the channel back to the user’s list of channels."""
return self.call_api_post("channels.open", roomId=room_id, kwargs=kwargs)
def channels_create(self, name, **kwargs):
"""Creates a new public channel, optionally including users."""
return self.call_api_post("channels.create", name=name, kwargs=kwargs)
def channels_get_integrations(self, room_id, **kwargs):
"""Retrieves the integrations which the channel has"""
return self.call_api_get(
"channels.getIntegrations", roomId=room_id, kwargs=kwargs
)
def channels_invite(self, room_id, user_id, **kwargs):
"""Adds a user to the channel."""
return self.call_api_post(
"channels.invite", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_join(self, room_id, join_code, **kwargs):
"""Joins yourself to the channel."""
return self.call_api_post(
"channels.join", roomId=room_id, joinCode=join_code, kwargs=kwargs
)
def channels_kick(self, room_id, user_id, **kwargs):
"""Removes a user from the channel."""
return self.call_api_post(
"channels.kick", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_leave(self, room_id, **kwargs):
"""Causes the callee to be removed from the channel."""
return self.call_api_post("channels.leave", roomId=room_id, kwargs=kwargs)
def channels_rename(self, room_id, name, **kwargs):
"""Changes the name of the channel."""
return self.call_api_post(
"channels.rename", roomId=room_id, name=name, kwargs=kwargs
)
def channels_set_description(self, room_id, description, **kwargs):
"""Sets the description for the channel."""
return self.call_api_post(
"channels.setDescription",
roomId=room_id,
description=description,
kwargs=kwargs,
)
def channels_set_join_code(self, room_id, join_code, **kwargs):
"""Sets the code required to join the channel."""
return self.call_api_post(
"channels.setJoinCode", roomId=room_id, joinCode=join_code, kwargs=kwargs
)
def channels_set_read_only(self, room_id, read_only, **kwargs):
"""Sets whether the channel is read only or not."""
return self.call_api_post(
"channels.setReadOnly",
roomId=room_id,
readOnly=bool(read_only),
kwargs=kwargs,
)
def channels_set_topic(self, room_id, topic, **kwargs):
"""Sets the topic for the channel."""
return self.call_api_post(
"channels.setTopic", roomId=room_id, topic=topic, kwargs=kwargs
)
def channels_set_type(self, room_id, a_type, **kwargs):
"""Sets the type of room this channel should be. The type of room this channel should be, either c or p."""
return self.call_api_post(
"channels.setType", roomId=room_id, type=a_type, kwargs=kwargs
)
def channels_set_announcement(self, room_id, announce, **kwargs):
"""Sets the announcement for the channel."""
return self.call_api_post(
"channels.setAnnouncement",
roomId=room_id,
announcement=announce,
kwargs=kwargs,
)
def channels_set_custom_fields(self, rid, custom_fields):
"""Sets the custom fields for the channel."""
return self.call_api_post(
"channels.setCustomFields", roomId=rid, customFields=custom_fields
)
def channels_delete(self, room_id=None, channel=None, **kwargs):
"""Delete a public channel."""
if room_id:
return self.call_api_post("channels.delete", roomId=room_id, kwargs=kwargs)
if channel:
return self.call_api_post(
"channels.delete", roomName=channel, kwargs=kwargs
)
raise RocketMissingParamException("room_id or channel required")
def channels_members(self, room_id=None, channel=None, **kwargs):
"""Lists all channel users."""
if room_id:
return self.call_api_get("channels.members", roomId=room_id, kwargs=kwargs)
if channel:
return self.call_api_get(
"channels.members", roomName=channel, kwargs=kwargs
)
raise RocketMissingParamException("room_id or channel required")
def channels_roles(self, room_id=None, room_name=None, **kwargs):
"""Lists all user’s roles in the channel."""
if room_id:
return self.call_api_get("channels.roles", roomId=room_id, kwargs=kwargs)
if room_name:
return self.call_api_get(
"channels.roles", roomName=room_name, kwargs=kwargs
)
raise RocketMissingParamException("room_id or room_name required")
def channels_files(self, room_id=None, room_name=None, **kwargs):
"""Retrieves the files from a channel."""
if room_id:
return self.call_api_get("channels.files", roomId=room_id, kwargs=kwargs)
if room_name:
return self.call_api_get(
"channels.files", roomName=room_name, kwargs=kwargs
)
raise RocketMissingParamException("room_id or room_name required")
def channels_get_all_user_mentions_by_channel(self, room_id, **kwargs):
"""Gets all the mentions of a channel."""
return self.call_api_get(
"channels.getAllUserMentionsByChannel", roomId=room_id, kwargs=kwargs
)
def channels_counters(self, room_id=None, room_name=None, **kwargs):
"""Gets counters for a channel."""
if room_id:
return self.call_api_get("channels.counters", roomId=room_id, kwargs=kwargs)
if room_name:
return self.call_api_get(
"channels.counters", roomName=room_name, kwargs=kwargs
)
raise RocketMissingParamException("room_id or room_name required")
def channels_online(self, query):
"""Lists all online users of a channel if the channel's id is provided, otherwise it gets all online users of
all channels."""
return self.call_api_get("channels.online", query=json.dumps(query)) | rocketchat_API/APISections/channels.py | import json
from rocketchat_API.APIExceptions.RocketExceptions import RocketMissingParamException
from rocketchat_API.APISections.base import RocketChatBase
class RocketChatChannels(RocketChatBase):
def channels_list(self, **kwargs):
"""Retrieves all of the channels from the server."""
return self.call_api_get("channels.list", kwargs=kwargs)
def channels_list_joined(self, **kwargs):
"""Lists all of the channels the calling user has joined"""
return self.call_api_get("channels.list.joined", kwargs=kwargs)
def channels_info(self, room_id=None, channel=None, **kwargs):
"""Gets a channel’s information."""
if room_id:
return self.call_api_get("channels.info", roomId=room_id, kwargs=kwargs)
if channel:
return self.call_api_get("channels.info", roomName=channel, kwargs=kwargs)
raise RocketMissingParamException("room_id or channel required")
def channels_history(self, room_id, **kwargs):
"""Retrieves the messages from a channel."""
return self.call_api_get("channels.history", roomId=room_id, kwargs=kwargs)
def channels_add_all(self, room_id, **kwargs):
"""Adds all of the users of the Rocket.Chat server to the channel."""
return self.call_api_post("channels.addAll", roomId=room_id, kwargs=kwargs)
def channels_add_moderator(self, room_id, user_id, **kwargs):
"""Gives the role of moderator for a user in the current channel."""
return self.call_api_post(
"channels.addModerator", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_remove_moderator(self, room_id, user_id, **kwargs):
"""Removes the role of moderator from a user in the current channel."""
return self.call_api_post(
"channels.removeModerator", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_moderators(self, room_id=None, channel=None, **kwargs):
"""Lists all moderators of a channel."""
if room_id:
return self.call_api_get(
"channels.moderators", roomId=room_id, kwargs=kwargs
)
if channel:
return self.call_api_get(
"channels.moderators", roomName=channel, kwargs=kwargs
)
raise RocketMissingParamException("room_id or channel required")
def channels_add_owner(self, room_id, user_id=None, username=None, **kwargs):
"""Gives the role of owner for a user in the current channel."""
if user_id:
return self.call_api_post(
"channels.addOwner", roomId=room_id, userId=user_id, kwargs=kwargs
)
if username:
return self.call_api_post(
"channels.addOwner", roomId=room_id, username=username, kwargs=kwargs
)
raise RocketMissingParamException("userID or username required")
def channels_remove_owner(self, room_id, user_id, **kwargs):
"""Removes the role of owner from a user in the current channel."""
return self.call_api_post(
"channels.removeOwner", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_add_leader(self, room_id, user_id, **kwargs):
"""Gives the role of Leader for a user in the current channel."""
return self.call_api_post(
"channels.addLeader", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_remove_leader(self, room_id, user_id, **kwargs):
"""Removes the role of Leader for a user in the current channel."""
return self.call_api_post(
"channels.removeLeader", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_archive(self, room_id, **kwargs):
"""Archives a channel."""
return self.call_api_post("channels.archive", roomId=room_id, kwargs=kwargs)
def channels_unarchive(self, room_id, **kwargs):
"""Unarchives a channel."""
return self.call_api_post("channels.unarchive", roomId=room_id, kwargs=kwargs)
def channels_close(self, room_id, **kwargs):
"""Removes the channel from the user’s list of channels."""
return self.call_api_post("channels.close", roomId=room_id, kwargs=kwargs)
def channels_open(self, room_id, **kwargs):
"""Adds the channel back to the user’s list of channels."""
return self.call_api_post("channels.open", roomId=room_id, kwargs=kwargs)
def channels_create(self, name, **kwargs):
"""Creates a new public channel, optionally including users."""
return self.call_api_post("channels.create", name=name, kwargs=kwargs)
def channels_get_integrations(self, room_id, **kwargs):
"""Retrieves the integrations which the channel has"""
return self.call_api_get(
"channels.getIntegrations", roomId=room_id, kwargs=kwargs
)
def channels_invite(self, room_id, user_id, **kwargs):
"""Adds a user to the channel."""
return self.call_api_post(
"channels.invite", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_join(self, room_id, join_code, **kwargs):
"""Joins yourself to the channel."""
return self.call_api_post(
"channels.join", roomId=room_id, joinCode=join_code, kwargs=kwargs
)
def channels_kick(self, room_id, user_id, **kwargs):
"""Removes a user from the channel."""
return self.call_api_post(
"channels.kick", roomId=room_id, userId=user_id, kwargs=kwargs
)
def channels_leave(self, room_id, **kwargs):
"""Causes the callee to be removed from the channel."""
return self.call_api_post("channels.leave", roomId=room_id, kwargs=kwargs)
def channels_rename(self, room_id, name, **kwargs):
"""Changes the name of the channel."""
return self.call_api_post(
"channels.rename", roomId=room_id, name=name, kwargs=kwargs
)
def channels_set_description(self, room_id, description, **kwargs):
"""Sets the description for the channel."""
return self.call_api_post(
"channels.setDescription",
roomId=room_id,
description=description,
kwargs=kwargs,
)
def channels_set_join_code(self, room_id, join_code, **kwargs):
"""Sets the code required to join the channel."""
return self.call_api_post(
"channels.setJoinCode", roomId=room_id, joinCode=join_code, kwargs=kwargs
)
def channels_set_read_only(self, room_id, read_only, **kwargs):
"""Sets whether the channel is read only or not."""
return self.call_api_post(
"channels.setReadOnly",
roomId=room_id,
readOnly=bool(read_only),
kwargs=kwargs,
)
def channels_set_topic(self, room_id, topic, **kwargs):
"""Sets the topic for the channel."""
return self.call_api_post(
"channels.setTopic", roomId=room_id, topic=topic, kwargs=kwargs
)
def channels_set_type(self, room_id, a_type, **kwargs):
"""Sets the type of room this channel should be. The type of room this channel should be, either c or p."""
return self.call_api_post(
"channels.setType", roomId=room_id, type=a_type, kwargs=kwargs
)
def channels_set_announcement(self, room_id, announce, **kwargs):
"""Sets the announcement for the channel."""
return self.call_api_post(
"channels.setAnnouncement",
roomId=room_id,
announcement=announce,
kwargs=kwargs,
)
def channels_set_custom_fields(self, rid, custom_fields):
"""Sets the custom fields for the channel."""
return self.call_api_post(
"channels.setCustomFields", roomId=rid, customFields=custom_fields
)
def channels_delete(self, room_id=None, channel=None, **kwargs):
"""Delete a public channel."""
if room_id:
return self.call_api_post("channels.delete", roomId=room_id, kwargs=kwargs)
if channel:
return self.call_api_post(
"channels.delete", roomName=channel, kwargs=kwargs
)
raise RocketMissingParamException("room_id or channel required")
def channels_members(self, room_id=None, channel=None, **kwargs):
"""Lists all channel users."""
if room_id:
return self.call_api_get("channels.members", roomId=room_id, kwargs=kwargs)
if channel:
return self.call_api_get(
"channels.members", roomName=channel, kwargs=kwargs
)
raise RocketMissingParamException("room_id or channel required")
def channels_roles(self, room_id=None, room_name=None, **kwargs):
"""Lists all user’s roles in the channel."""
if room_id:
return self.call_api_get("channels.roles", roomId=room_id, kwargs=kwargs)
if room_name:
return self.call_api_get(
"channels.roles", roomName=room_name, kwargs=kwargs
)
raise RocketMissingParamException("room_id or room_name required")
def channels_files(self, room_id=None, room_name=None, **kwargs):
"""Retrieves the files from a channel."""
if room_id:
return self.call_api_get("channels.files", roomId=room_id, kwargs=kwargs)
if room_name:
return self.call_api_get(
"channels.files", roomName=room_name, kwargs=kwargs
)
raise RocketMissingParamException("room_id or room_name required")
def channels_get_all_user_mentions_by_channel(self, room_id, **kwargs):
"""Gets all the mentions of a channel."""
return self.call_api_get(
"channels.getAllUserMentionsByChannel", roomId=room_id, kwargs=kwargs
)
def channels_counters(self, room_id=None, room_name=None, **kwargs):
"""Gets counters for a channel."""
if room_id:
return self.call_api_get("channels.counters", roomId=room_id, kwargs=kwargs)
if room_name:
return self.call_api_get(
"channels.counters", roomName=room_name, kwargs=kwargs
)
raise RocketMissingParamException("room_id or room_name required")
def channels_online(self, query):
"""Lists all online users of a channel if the channel's id is provided, otherwise it gets all online users of
all channels."""
return self.call_api_get("channels.online", query=json.dumps(query)) | 0.775945 | 0.064329 |
import json
import os
import time
from periphery import I2C
import notecard
import sys
import cv2
from edge_impulse_linux.image import ImageImpulseRunner
productUID = "<com.blues.your_name:your_project>"
dir_path = os.path.dirname(os.path.realpath(__file__))
modelfile = os.path.join(dir_path, '../model/model.eim')
print(f'Using model at {modelfile}')
print("Connecting to Notecard...")
port = I2C("/dev/i2c-1")
card = notecard.OpenI2C(port, 0, 0, debug=True)
def now():
return round(time.time() * 1000)
def get_webcams():
port_ids = []
for port in range(5):
print("Looking for a camera in port %s:" %port)
camera = cv2.VideoCapture(port)
if camera.isOpened():
ret = camera.read()[0]
if ret:
backendName =camera.getBackendName()
w = camera.get(3)
h = camera.get(4)
print("Camera %s (%s x %s) found in port %s " %(backendName,h,w, port))
port_ids.append(port)
camera.release()
return port_ids
def main():
print(f'Configuring Product: {productUID}...')
req = {"req": "hub.set"}
req["product"] = productUID
req["mode"] = "periodic"
req["outbound"] = 60
req["inbound"] = 120
req["align"] = True
card.Transaction(req)
main()
while True:
print("Taking a sample from the camera...")
with ImageImpulseRunner(modelfile) as runner:
try:
model_info = runner.init()
print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + ' (v' + str(model_info['project']['deploy_version']) + ')"')
labels = model_info['model_parameters']['labels']
videoCaptureDeviceId = 0
camera = cv2.VideoCapture(videoCaptureDeviceId)
ret = camera.read()[0]
if ret:
backendName = camera.getBackendName()
w = camera.get(3)
h = camera.get(4)
print("Camera %s (%s x %s) in port %s selected." %(backendName,h,w, videoCaptureDeviceId))
camera.release()
else:
raise Exception("Couldn't initialize selected camera.")
next_frame = 0 # limit to ~10 fps here
inference_count = 0
for res, img in runner.classifier(videoCaptureDeviceId):
if (next_frame > now()):
time.sleep((next_frame - now()) / 1000)
next_frame = now() + 500
inference_count += 1
# print('classification runner response\n', sorted(res['result']['classification'].items(), key=lambda x:x[1], reverse=True))
if inference_count == 5:
inference_count = 0
print('classification runner response', res['result']['classification'])
if "classification" in res["result"].keys():
req = {"req": "note.add"}
req["sync"] = True
note_body = {"inference_time": res['timing']['dsp'] + res['timing']['classification']}
print('Result (%d ms.) ' % (res['timing']['dsp'] + res['timing']['classification']), end='')
print('', flush=True)
sorted_items = sorted(res['result']['classification'].items(), key=lambda x:x[1], reverse=True)
inferred_state = sorted_items[0][0]
note_body["tank-state"] = inferred_state
note_body["classification"] = res['result']['classification']
req["body"] = note_body
card.Transaction(req)
# If the state is low or high, send a different Note with an
# alert message
req = {"req": "note.add"}
req["sync"] = True
req["file"] = "tank-alert.qo"
if inferred_state == 'tank-pressure-low':
req["body"] = {"message": "Tank pressure is low. Clean impeller."}
card.Transaction(req)
elif inferred_state == 'tank-pressure-high':
req["body"] = {"message": "Tank pressure is high. Backwash filter."}
card.Transaction(req)
break
finally:
if (runner):
runner.stop()
print("Pausing until next capture...")
time.sleep(240) | src/main.py | import json
import os
import time
from periphery import I2C
import notecard
import sys
import cv2
from edge_impulse_linux.image import ImageImpulseRunner
productUID = "<com.blues.your_name:your_project>"
dir_path = os.path.dirname(os.path.realpath(__file__))
modelfile = os.path.join(dir_path, '../model/model.eim')
print(f'Using model at {modelfile}')
print("Connecting to Notecard...")
port = I2C("/dev/i2c-1")
card = notecard.OpenI2C(port, 0, 0, debug=True)
def now():
return round(time.time() * 1000)
def get_webcams():
port_ids = []
for port in range(5):
print("Looking for a camera in port %s:" %port)
camera = cv2.VideoCapture(port)
if camera.isOpened():
ret = camera.read()[0]
if ret:
backendName =camera.getBackendName()
w = camera.get(3)
h = camera.get(4)
print("Camera %s (%s x %s) found in port %s " %(backendName,h,w, port))
port_ids.append(port)
camera.release()
return port_ids
def main():
print(f'Configuring Product: {productUID}...')
req = {"req": "hub.set"}
req["product"] = productUID
req["mode"] = "periodic"
req["outbound"] = 60
req["inbound"] = 120
req["align"] = True
card.Transaction(req)
main()
while True:
print("Taking a sample from the camera...")
with ImageImpulseRunner(modelfile) as runner:
try:
model_info = runner.init()
print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + ' (v' + str(model_info['project']['deploy_version']) + ')"')
labels = model_info['model_parameters']['labels']
videoCaptureDeviceId = 0
camera = cv2.VideoCapture(videoCaptureDeviceId)
ret = camera.read()[0]
if ret:
backendName = camera.getBackendName()
w = camera.get(3)
h = camera.get(4)
print("Camera %s (%s x %s) in port %s selected." %(backendName,h,w, videoCaptureDeviceId))
camera.release()
else:
raise Exception("Couldn't initialize selected camera.")
next_frame = 0 # limit to ~10 fps here
inference_count = 0
for res, img in runner.classifier(videoCaptureDeviceId):
if (next_frame > now()):
time.sleep((next_frame - now()) / 1000)
next_frame = now() + 500
inference_count += 1
# print('classification runner response\n', sorted(res['result']['classification'].items(), key=lambda x:x[1], reverse=True))
if inference_count == 5:
inference_count = 0
print('classification runner response', res['result']['classification'])
if "classification" in res["result"].keys():
req = {"req": "note.add"}
req["sync"] = True
note_body = {"inference_time": res['timing']['dsp'] + res['timing']['classification']}
print('Result (%d ms.) ' % (res['timing']['dsp'] + res['timing']['classification']), end='')
print('', flush=True)
sorted_items = sorted(res['result']['classification'].items(), key=lambda x:x[1], reverse=True)
inferred_state = sorted_items[0][0]
note_body["tank-state"] = inferred_state
note_body["classification"] = res['result']['classification']
req["body"] = note_body
card.Transaction(req)
# If the state is low or high, send a different Note with an
# alert message
req = {"req": "note.add"}
req["sync"] = True
req["file"] = "tank-alert.qo"
if inferred_state == 'tank-pressure-low':
req["body"] = {"message": "Tank pressure is low. Clean impeller."}
card.Transaction(req)
elif inferred_state == 'tank-pressure-high':
req["body"] = {"message": "Tank pressure is high. Backwash filter."}
card.Transaction(req)
break
finally:
if (runner):
runner.stop()
print("Pausing until next capture...")
time.sleep(240) | 0.207616 | 0.088347 |
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
number_of_inputs = 63
number_of_outputs = 1
learning_rate=0.001
training_epochs=100
display_step=5
layer_1_nodes=50
layer_2_nodes=100
layer_3_nodes=50
def readData():
global x_scaled_training, y_scaled_training, x_scaled_testing, y_scaled_testing, x_scaler, y_scaler
dataFrame = pd.read_csv("house_data.csv")
del dataFrame["house_number"]
del dataFrame['street_name']
del dataFrame['unit_number']
del dataFrame['zip_code']
featuresDataFrame = pd.get_dummies(dataFrame, columns=["city", "garage_type"])
del featuresDataFrame['sale_price']
global x_train, x_test, y_train, y_test
x=featuresDataFrame.as_matrix()
y=dataFrame[['sale_price']].as_matrix()
x_scaler = MinMaxScaler(feature_range=(0, 1))
y_scaler = MinMaxScaler(feature_range=(0, 1))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
x_scaled_training = x_scaler.fit_transform(x_train)
y_scaled_training = y_scaler.fit_transform(y_train)
x_scaled_testing = x_scaler.transform(x_test)
y_scaled_testing = y_scaler.transform(y_test)
print(len(x_train[0]), end="\n\n")
# print(x_scaled_training[:5], end="\n\n")
# print("The scale on X_data is: \n", x_scaler.scale_, "\nWith adjustments of: \n", x_scaler.min_)
# print("\nThe scale on Y_data is: \n", y_scaler.scale_, "\nWith adjustments of: \n", y_scaler.min_)
# print("\nNote: Y values were scaled by multiplying by {:.10f} and adding {:.4f}".format(Y_scaler.scale_[0], y_scaler.min_[0]))
def trainModel():
global number_of_inputs, number_of_outputs, learning_rate, training_epochs, display_step, layer_1_nodes, layer_2_nodes, layer_3_nodes
with tf.variable_scope('input'):
x = tf.placeholder(tf.float32, shape=(None, number_of_inputs))
with tf.variable_scope('layer_1'):
weights = tf.get_variable(name='weights_1', shape=[number_of_inputs, layer_1_nodes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name='biases_1', shape=[layer_1_nodes], initializer=tf.zeros_initializer())
# Using relu and matrix multiplication to define the activation function
layer_1_output = tf.nn.relu(tf.matmul(x, weights) + biases)
with tf.variable_scope('layer_2'):
weights = tf.get_variable(name='weights_2', shape=[layer_1_nodes, layer_2_nodes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name='biases_2', shape=[layer_2_nodes], initializer=tf.zeros_initializer())
# Using relu and matrix multiplication to define the activation function
layer_2_output = tf.nn.relu(tf.matmul(layer_1_output, weights) + biases)
with tf.variable_scope('layer_3'):
weights = tf.get_variable(name='weights_3', shape=[layer_2_nodes, layer_3_nodes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name='biases_3', shape=[layer_3_nodes], initializer=tf.zeros_initializer())
# Using relu and matrix multiplication to define the activation function
layer_3_output = tf.nn.relu(tf.matmul(layer_2_output, weights) + biases)
with tf.variable_scope('output'):
weights = tf.get_variable(name='weights_4', shape=[layer_3_nodes, number_of_outputs], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name='biases_4', shape=[number_of_outputs], initializer=tf.zeros_initializer())
# Using relu and matrix multiplication to define the activation function
output = tf.nn.relu(tf.matmul(layer_3_output, weights) + biases)
with tf.variable_scope('cost'):
y=tf.placeholder(tf.float32, shape=(None, 1))
cost=tf.reduce_mean(tf.squared_difference(output, y))
with tf.variable_scope('train'):
optimizer=tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.variable_scope('logging'):
tf.summary.scalar('current_cost', cost)
log = tf.summary.merge_all()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
training_writer = tf.summary.FileWriter('./Logs/training', session.graph)
testing_writer = tf.summary.FileWriter('./Logs/testing', session.graph)
for i in range(training_epochs):
session.run(optimizer, feed_dict={x : x_scaled_training, y : y_scaled_training})
training_cost, training_prediction, training_log=session.run([cost, output, log], feed_dict={x: x_scaled_training, y: y_scaled_training})
testing_cost, testing_prediction, testing_log=session.run([cost, output, log], feed_dict={x: x_scaled_testing, y: y_scaled_testing})
training_writer.add_summary(training_log, i)
testing_writer.add_summary(testing_log, i)
print("Training Pass: {}".format(i))
print("Training Cost:", training_cost)
print("Testing Cost: ", testing_cost)
#print("Training Prediction:", training_prediction)
print("Training Complete")
readData()
trainModel() | HouseTensor.py | import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
number_of_inputs = 63
number_of_outputs = 1
learning_rate=0.001
training_epochs=100
display_step=5
layer_1_nodes=50
layer_2_nodes=100
layer_3_nodes=50
def readData():
global x_scaled_training, y_scaled_training, x_scaled_testing, y_scaled_testing, x_scaler, y_scaler
dataFrame = pd.read_csv("house_data.csv")
del dataFrame["house_number"]
del dataFrame['street_name']
del dataFrame['unit_number']
del dataFrame['zip_code']
featuresDataFrame = pd.get_dummies(dataFrame, columns=["city", "garage_type"])
del featuresDataFrame['sale_price']
global x_train, x_test, y_train, y_test
x=featuresDataFrame.as_matrix()
y=dataFrame[['sale_price']].as_matrix()
x_scaler = MinMaxScaler(feature_range=(0, 1))
y_scaler = MinMaxScaler(feature_range=(0, 1))
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
x_scaled_training = x_scaler.fit_transform(x_train)
y_scaled_training = y_scaler.fit_transform(y_train)
x_scaled_testing = x_scaler.transform(x_test)
y_scaled_testing = y_scaler.transform(y_test)
print(len(x_train[0]), end="\n\n")
# print(x_scaled_training[:5], end="\n\n")
# print("The scale on X_data is: \n", x_scaler.scale_, "\nWith adjustments of: \n", x_scaler.min_)
# print("\nThe scale on Y_data is: \n", y_scaler.scale_, "\nWith adjustments of: \n", y_scaler.min_)
# print("\nNote: Y values were scaled by multiplying by {:.10f} and adding {:.4f}".format(Y_scaler.scale_[0], y_scaler.min_[0]))
def trainModel():
global number_of_inputs, number_of_outputs, learning_rate, training_epochs, display_step, layer_1_nodes, layer_2_nodes, layer_3_nodes
with tf.variable_scope('input'):
x = tf.placeholder(tf.float32, shape=(None, number_of_inputs))
with tf.variable_scope('layer_1'):
weights = tf.get_variable(name='weights_1', shape=[number_of_inputs, layer_1_nodes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name='biases_1', shape=[layer_1_nodes], initializer=tf.zeros_initializer())
# Using relu and matrix multiplication to define the activation function
layer_1_output = tf.nn.relu(tf.matmul(x, weights) + biases)
with tf.variable_scope('layer_2'):
weights = tf.get_variable(name='weights_2', shape=[layer_1_nodes, layer_2_nodes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name='biases_2', shape=[layer_2_nodes], initializer=tf.zeros_initializer())
# Using relu and matrix multiplication to define the activation function
layer_2_output = tf.nn.relu(tf.matmul(layer_1_output, weights) + biases)
with tf.variable_scope('layer_3'):
weights = tf.get_variable(name='weights_3', shape=[layer_2_nodes, layer_3_nodes], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name='biases_3', shape=[layer_3_nodes], initializer=tf.zeros_initializer())
# Using relu and matrix multiplication to define the activation function
layer_3_output = tf.nn.relu(tf.matmul(layer_2_output, weights) + biases)
with tf.variable_scope('output'):
weights = tf.get_variable(name='weights_4', shape=[layer_3_nodes, number_of_outputs], initializer=tf.contrib.layers.xavier_initializer())
biases = tf.get_variable(name='biases_4', shape=[number_of_outputs], initializer=tf.zeros_initializer())
# Using relu and matrix multiplication to define the activation function
output = tf.nn.relu(tf.matmul(layer_3_output, weights) + biases)
with tf.variable_scope('cost'):
y=tf.placeholder(tf.float32, shape=(None, 1))
cost=tf.reduce_mean(tf.squared_difference(output, y))
with tf.variable_scope('train'):
optimizer=tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.variable_scope('logging'):
tf.summary.scalar('current_cost', cost)
log = tf.summary.merge_all()
with tf.Session() as session:
session.run(tf.global_variables_initializer())
training_writer = tf.summary.FileWriter('./Logs/training', session.graph)
testing_writer = tf.summary.FileWriter('./Logs/testing', session.graph)
for i in range(training_epochs):
session.run(optimizer, feed_dict={x : x_scaled_training, y : y_scaled_training})
training_cost, training_prediction, training_log=session.run([cost, output, log], feed_dict={x: x_scaled_training, y: y_scaled_training})
testing_cost, testing_prediction, testing_log=session.run([cost, output, log], feed_dict={x: x_scaled_testing, y: y_scaled_testing})
training_writer.add_summary(training_log, i)
testing_writer.add_summary(testing_log, i)
print("Training Pass: {}".format(i))
print("Training Cost:", training_cost)
print("Testing Cost: ", testing_cost)
#print("Training Prediction:", training_prediction)
print("Training Complete")
readData()
trainModel() | 0.41834 | 0.515498 |
import os
import pytest
import shutil
import tarfile
import tempfile
import unittest
from datetime import datetime
from elyra.util import create_temp_archive
class ArchiveTestCase(unittest.TestCase):
temp_dir = tempfile.gettempdir()
test_timestamp = datetime.now().strftime("%m%d%H%M%S")
test_dir_name = 'test_' + test_timestamp
test_dir = None
test_files = [
'a.py',
'b.py',
'c.json',
'd.txt',
'e.ipynb'
]
def setUp(self):
"""
Setup a temp folder with some files to be used
during test cases
"""
# create test files
self.test_dir = tempfile.mkdtemp(self.test_dir_name)
self._create_test_files(self.test_dir)
def tearDown(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_archive_all(self):
test_archive_name = 'all-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*'])
self.assertArchivedContent(archive_path, self.test_files)
def test_archive_empty_filter(self):
test_archive_name = 'all-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=[])
self.assertArchivedFileCount(archive_path, 0)
def test_archive_no_filter(self):
test_archive_name = 'all-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir)
self.assertArchivedFileCount(archive_path, 0)
def test_archive_by_filter(self):
test_archive_name = 'python-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*.py'])
self.assertArchivedContent(archive_path, ['a.py', 'b.py'])
def test_archive_by_sequence_wildcard(self):
test_archive_name = 'python-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['[ab].*'])
self.assertArchivedContent(archive_path, ['a.py', 'b.py'])
def test_archive_by_excluded_sequence(self):
test_archive_name = 'python-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*[!b].py'])
self.assertArchivedContent(archive_path, ['a.py'])
def test_archive_multiple_filters(self):
test_archive_name = 'multiple-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*.json', '*.txt'])
self.assertArchivedContent(archive_path, ['c.json', 'd.txt'])
def test_archive_require_complete(self):
test_archive_name = 'multiple-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir,
filenames=['*.json', '*.txt', 'a.py'],
require_complete=True)
self.assertArchivedContent(archive_path, ['c.json', 'd.txt', 'a.py'])
def test_archive_require_complete_fail(self):
test_archive_name = 'multiple-' + self.test_timestamp + '.tar.gz'
# c.py does not exist and exception is expected
with pytest.raises(FileNotFoundError) as ex:
create_temp_archive(test_archive_name, self.test_dir,
filenames=['*.json', '*.txt', 'a.py', 'c.py'],
require_complete=True)
assert "{'c.py'}" in str(ex) # ensure c.py is the only item not matched
def test_archive_nonexistent_filter(self):
test_archive_name = 'empty-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*.yml'])
self.assertArchivedContent(archive_path, [])
def test_archive_with_subdirectories(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
filenames=['*'],
recursive=True)
self.assertArchivedFileCount(archive_path, 10)
def test_archive_with_subdirectories_no_filter(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
recursive=True)
self.assertArchivedFileCount(archive_path, 0)
def test_archive_with_subdirectories_and_filters(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
filenames=['subdir/*.py'],
recursive=True)
self.assertArchivedFileCount(archive_path, 2)
self.assertArchivedContent(archive_path, ['subdir/a.py', 'subdir/b.py'])
def test_archive_with_second_level_subdirectories(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
another_subdir_name = os.path.join(subdir_name, 'another.subdir')
os.makedirs(another_subdir_name)
self._create_test_files(another_subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
filenames=['*'],
recursive=True)
self.assertArchivedFileCount(archive_path, 15)
def test_archive_with_second_level_subdirectories_and_nonexistent_filter(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
another_subdir_name = os.path.join(subdir_name, 'another.subdir')
os.makedirs(another_subdir_name)
self._create_test_files(another_subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
filenames=['*.yml'],
recursive=True)
self.assertArchivedFileCount(archive_path, 0)
def assertArchivedContent(self, archive_path, expected_content):
actual_content = []
with tarfile.open(archive_path, "r:gz") as tar:
for tarinfo in tar:
if tarinfo.isreg():
actual_content.append(tarinfo.name)
self.assertListEqual(sorted(actual_content), sorted(expected_content))
def assertArchivedFileCount(self, archive_path, expected_number_of_files):
n_files = 0
with tarfile.open(archive_path, "r:gz") as tar:
for tarinfo in tar:
if tarinfo.isreg():
n_files = n_files + 1
self.assertEqual(expected_number_of_files, n_files)
def _create_test_files(self, dir_name):
for test_file in self.test_files:
file_path = os.path.join(dir_name, test_file)
with open(file_path, 'a'):
os.utime(file_path, None) | elyra/util/tests/test_archive.py | import os
import pytest
import shutil
import tarfile
import tempfile
import unittest
from datetime import datetime
from elyra.util import create_temp_archive
class ArchiveTestCase(unittest.TestCase):
temp_dir = tempfile.gettempdir()
test_timestamp = datetime.now().strftime("%m%d%H%M%S")
test_dir_name = 'test_' + test_timestamp
test_dir = None
test_files = [
'a.py',
'b.py',
'c.json',
'd.txt',
'e.ipynb'
]
def setUp(self):
"""
Setup a temp folder with some files to be used
during test cases
"""
# create test files
self.test_dir = tempfile.mkdtemp(self.test_dir_name)
self._create_test_files(self.test_dir)
def tearDown(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_archive_all(self):
test_archive_name = 'all-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*'])
self.assertArchivedContent(archive_path, self.test_files)
def test_archive_empty_filter(self):
test_archive_name = 'all-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=[])
self.assertArchivedFileCount(archive_path, 0)
def test_archive_no_filter(self):
test_archive_name = 'all-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir)
self.assertArchivedFileCount(archive_path, 0)
def test_archive_by_filter(self):
test_archive_name = 'python-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*.py'])
self.assertArchivedContent(archive_path, ['a.py', 'b.py'])
def test_archive_by_sequence_wildcard(self):
test_archive_name = 'python-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['[ab].*'])
self.assertArchivedContent(archive_path, ['a.py', 'b.py'])
def test_archive_by_excluded_sequence(self):
test_archive_name = 'python-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*[!b].py'])
self.assertArchivedContent(archive_path, ['a.py'])
def test_archive_multiple_filters(self):
test_archive_name = 'multiple-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*.json', '*.txt'])
self.assertArchivedContent(archive_path, ['c.json', 'd.txt'])
def test_archive_require_complete(self):
test_archive_name = 'multiple-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir,
filenames=['*.json', '*.txt', 'a.py'],
require_complete=True)
self.assertArchivedContent(archive_path, ['c.json', 'd.txt', 'a.py'])
def test_archive_require_complete_fail(self):
test_archive_name = 'multiple-' + self.test_timestamp + '.tar.gz'
# c.py does not exist and exception is expected
with pytest.raises(FileNotFoundError) as ex:
create_temp_archive(test_archive_name, self.test_dir,
filenames=['*.json', '*.txt', 'a.py', 'c.py'],
require_complete=True)
assert "{'c.py'}" in str(ex) # ensure c.py is the only item not matched
def test_archive_nonexistent_filter(self):
test_archive_name = 'empty-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(test_archive_name, self.test_dir, filenames=['*.yml'])
self.assertArchivedContent(archive_path, [])
def test_archive_with_subdirectories(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
filenames=['*'],
recursive=True)
self.assertArchivedFileCount(archive_path, 10)
def test_archive_with_subdirectories_no_filter(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
recursive=True)
self.assertArchivedFileCount(archive_path, 0)
def test_archive_with_subdirectories_and_filters(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
filenames=['subdir/*.py'],
recursive=True)
self.assertArchivedFileCount(archive_path, 2)
self.assertArchivedContent(archive_path, ['subdir/a.py', 'subdir/b.py'])
def test_archive_with_second_level_subdirectories(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
another_subdir_name = os.path.join(subdir_name, 'another.subdir')
os.makedirs(another_subdir_name)
self._create_test_files(another_subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
filenames=['*'],
recursive=True)
self.assertArchivedFileCount(archive_path, 15)
def test_archive_with_second_level_subdirectories_and_nonexistent_filter(self):
subdir_name = os.path.join(self.test_dir, 'subdir')
os.makedirs(subdir_name)
self._create_test_files(subdir_name)
another_subdir_name = os.path.join(subdir_name, 'another.subdir')
os.makedirs(another_subdir_name)
self._create_test_files(another_subdir_name)
test_archive_name = 'subdir-' + self.test_timestamp + '.tar.gz'
archive_path = create_temp_archive(archive_name=test_archive_name,
source_dir=self.test_dir,
filenames=['*.yml'],
recursive=True)
self.assertArchivedFileCount(archive_path, 0)
def assertArchivedContent(self, archive_path, expected_content):
actual_content = []
with tarfile.open(archive_path, "r:gz") as tar:
for tarinfo in tar:
if tarinfo.isreg():
actual_content.append(tarinfo.name)
self.assertListEqual(sorted(actual_content), sorted(expected_content))
def assertArchivedFileCount(self, archive_path, expected_number_of_files):
n_files = 0
with tarfile.open(archive_path, "r:gz") as tar:
for tarinfo in tar:
if tarinfo.isreg():
n_files = n_files + 1
self.assertEqual(expected_number_of_files, n_files)
def _create_test_files(self, dir_name):
for test_file in self.test_files:
file_path = os.path.join(dir_name, test_file)
with open(file_path, 'a'):
os.utime(file_path, None) | 0.263031 | 0.511351 |
import os.path
import torchvision.transforms as transforms
from .dataset import DatasetBase
from PIL import Image
import random
import numpy as np
import pickle
import pandas as pd
from PATH import PATH
PRESET_VARS = PATH()
import pickle
class Test_dataset(object):
def __init__(self, opt, video_data, train_mode = 'Test', transform = None):
self._name = 'Test_dataset'
self._train_mode = train_mode
if transform is not None:
self._transform = transform
else:
self._transform = self._create_transform()
# read dataset
self._data = video_data
self._read_dataset()
def __getitem__(self, index):
assert (index < self._dataset_size)
image = None
label = None
img_path = self._data['path'][index]
image = Image.open( img_path).convert('RGB')
label = self._data['label'][index]
frame_id = self._data['frames_ids'][index]
# transform data
image = self._transform(image)
# pack data
sample = {'image': image,
'label': label,
'path': img_path,
'index': index,
'frames_ids': frame_id
}
return sample
def _read_dataset(self):
self._ids = np.arange(len(self._data['path']))
self._dataset_size = len(self._ids)
def __len__(self):
return self._dataset_size
def _create_transform(self):
if self._train_mode == 'Train':
img_size = self._opt.image_size
resize = int(img_size * 1.2)
transform_list = [transforms.Resize(resize),
transforms.RandomCrop(img_size),
transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]
else:
img_size = self._opt.image_size
transform_list = [transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]
self._transform = transforms.Compose(transform_list) | Multitask-CNN/data/test_video_dataset.py | import os.path
import torchvision.transforms as transforms
from .dataset import DatasetBase
from PIL import Image
import random
import numpy as np
import pickle
import pandas as pd
from PATH import PATH
PRESET_VARS = PATH()
import pickle
class Test_dataset(object):
def __init__(self, opt, video_data, train_mode = 'Test', transform = None):
self._name = 'Test_dataset'
self._train_mode = train_mode
if transform is not None:
self._transform = transform
else:
self._transform = self._create_transform()
# read dataset
self._data = video_data
self._read_dataset()
def __getitem__(self, index):
assert (index < self._dataset_size)
image = None
label = None
img_path = self._data['path'][index]
image = Image.open( img_path).convert('RGB')
label = self._data['label'][index]
frame_id = self._data['frames_ids'][index]
# transform data
image = self._transform(image)
# pack data
sample = {'image': image,
'label': label,
'path': img_path,
'index': index,
'frames_ids': frame_id
}
return sample
def _read_dataset(self):
self._ids = np.arange(len(self._data['path']))
self._dataset_size = len(self._ids)
def __len__(self):
return self._dataset_size
def _create_transform(self):
if self._train_mode == 'Train':
img_size = self._opt.image_size
resize = int(img_size * 1.2)
transform_list = [transforms.Resize(resize),
transforms.RandomCrop(img_size),
transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]
else:
img_size = self._opt.image_size
transform_list = [transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]
self._transform = transforms.Compose(transform_list) | 0.655667 | 0.235581 |
import pygame
import math
import numpy as np
from pygame.locals import *
class Wheel():
def __init__(self, window, colour, pos, size):
self.window = window
self.colour = colour
self.size = size
self.pos = np.array(pos)
self.ang = 0.0
self.mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
self.points_mat = np.array([[-self.size / 2, -self.size],
[self.size / 2, -self.size],
[self.size / 2, self.size],
[-self.size / 2, self.size]])
def render(self):
points = np.matmul(self.mat, np.transpose(self.points_mat))
points = np.transpose(points)
pygame.draw.polygon(self.window, self.colour, points + self.pos)
def set_pos(self, pos):
self.pos = pos
def set_ang(self, ang):
self.ang = ang
self.mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
class Car():
def __init__(self, window, colours, pos, size):
self.window = window
self.colours = colours
self.size = size
self.pos = np.array(pos)
self.speed = 0.0
self.vel = np.array([0.0,0.0])
self.acc = 0.0
self.term_speed = 400
self.ang = 0.0
self.ang_mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
self.wheel_vel = 0.0
self.wheel_ang = 0.0
self.max_wheel_ang = math.pi/6
self.points_mat = np.array([[self.pos[0] - self.size, self.pos[1] - self.size*2.5],
[self.pos[0] + self.size, self.pos[1] - self.size*2.5],
[self.pos[0] + self.size, self.pos[1] + self.size*2.5],
[self.pos[0] - self.size, self.pos[1] + self.size*2.5]])
self.wheel_pos = np.array([[-self.size,-self.size*1.6],
[ self.size, -self.size*1.6],
[ self.size, self.size*1.6],
[-self.size, self.size*1.6],
[0, self.size*1.6],
[0, -self.size*1.6]])
self.front_axel = np.array([self.pos[0], self.pos[1] + self.size * 1.6])
self.rear_axel = np.array([self.pos[0], self.pos[1] - self.size * 1.6])
self.turning_point = np.array([0.0,0.0])
self.wheels = []
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] - self.size, self.pos[1] - self.size * 1.6], size / 3))
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] + self.size, self.pos[1] - self.size * 1.6], size / 3))
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] + self.size, self.pos[1] + self.size * 1.6], size / 3))
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] - self.size, self.pos[1] + self.size * 1.6], size / 3))
def dynamics(self, frame_time):
pressed = pygame.key.get_pressed()
#Rotation inputs
if (pressed[pygame.K_a] and not pressed[pygame.K_d]) or (not pressed[pygame.K_a] and pressed[pygame.K_d]):
if pressed[pygame.K_a] and not pressed[pygame.K_d]:
self.wheel_vel = -2
elif not pressed[pygame.K_a] and pressed[pygame.K_d]:
self.wheel_vel = 2
else:
if self.wheel_ang > 0.01:
self.wheel_vel = -2
elif self.wheel_ang < -0.01:
self.wheel_vel = 2
else:
self.wheel_vel = 0
#Limit rotation angle to maximum
self.wheel_ang += self.wheel_vel * frame_time
if self.wheel_ang > self.max_wheel_ang:
self.wheel_ang = self.max_wheel_ang
elif self.wheel_ang < -self.max_wheel_ang:
self.wheel_ang = -self.max_wheel_ang
#Translation inputs
if pressed[pygame.K_w] and not pressed[pygame.K_s]:
self.acc = 100
elif not pressed[pygame.K_w] and pressed[pygame.K_s]:
self.acc = -100
else:
if self.speed > 0.0001:
self.acc = -50
elif self.speed < 0.0001:
self.acc = 50
else:
self.acc = 0
#Limit speed to terminal speed
if self.speed > self.term_speed:
self.speed = self.term_speed
elif self.speed < -self.term_speed/3:
self.speed = -self.term_speed/3
#Recalculate wheel positions
wheel_pos = np.matmul(self.ang_mat, np.transpose(self.wheel_pos))
wheel_pos = np.transpose(wheel_pos)
#Find axel pivot points
self.front_axel = wheel_pos[4]
self.rear_axel = wheel_pos[5]
#Recalculate wheel matrix
self.front_mat = np.array([[math.cos(self.wheel_ang + self.ang), -math.sin(self.wheel_ang + self.ang)],
[math.sin(self.wheel_ang + self.ang), math.cos(self.wheel_ang + self.ang)]])
#Calculate wheel normals
self.front_norm = np.matmul(self.front_mat, np.transpose(np.array([1.0, 0.0])))
self.front_norm = np.transpose(self.front_norm)
self.rear_norm = np.matmul(self.ang_mat, np.transpose(np.array([1.0, 0.0])))
self.rear_norm = np.transpose(self.rear_norm)
#Find turing point
if (self.rear_norm[0] * self.front_norm[1] - self.rear_norm[1] * self.front_norm[0]) != 0:
mu = ((self.rear_norm[0]*(self.rear_axel[1] - self.front_axel[1]) - self.rear_norm[1]*(self.rear_axel[0] - self.front_axel[0]))
/ (self.rear_norm[0] * self.front_norm[1] - self.rear_norm[1] * self.front_norm[0]))
self.turning_point = self.front_axel + mu * self.front_norm + self.pos
else:
mu = 100000
self.turning_point = self.rear_axel + mu * self.rear_norm + self.pos
#Move car geomery away from turning point
self.points_mat = np.array([[self.points_mat[0][0] - self.turning_point[0], self.points_mat[0][1] - self.turning_point[1]],
[self.points_mat[1][0] - self.turning_point[0], self.points_mat[1][1] - self.turning_point[1]],
[self.points_mat[2][0] - self.turning_point[0], self.points_mat[2][1] - self.turning_point[1]],
[self.points_mat[3][0] - self.turning_point[0], self.points_mat[3][1] - self.turning_point[1]]])
#Calculate rotation angle
radius = np.sqrt((self.pos - self.turning_point).dot(self.pos - self.turning_point))
self.speed += self.acc * frame_time
displacement = self.speed * frame_time
angle = displacement / radius
if self.wheel_ang < 0:
angle *= -1
self.ang += angle
self.ang_mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
translation_mat = np.array([[math.cos(angle), -math.sin(angle)],
[math.sin(angle), math.cos(angle)]])
#Apply translation matrix
self.points_mat = np.matmul(translation_mat, np.transpose(self.points_mat))
self.points_mat = np.transpose(self.points_mat)
#Move car geometry back from turning point
self.points_mat = np.array([[self.points_mat[0][0] + self.turning_point[0], self.points_mat[0][1] + self.turning_point[1]],
[self.points_mat[1][0] + self.turning_point[0], self.points_mat[1][1] + self.turning_point[1]],
[self.points_mat[2][0] + self.turning_point[0], self.points_mat[2][1] + self.turning_point[1]],
[self.points_mat[3][0] + self.turning_point[0], self.points_mat[3][1] + self.turning_point[1]]])
self.pos = np.array([(self.points_mat[0][0] + self.points_mat[1][0] + self.points_mat[2][0] + self.points_mat[3][0]) / 4,
(self.points_mat[0][1] + self.points_mat[1][1] + self.points_mat[2][1] + self.points_mat[3][1]) / 4])
#Recalculate wheel positions
wheel_pos = np.matmul(self.ang_mat, np.transpose(self.wheel_pos))
wheel_pos = np.transpose(wheel_pos)
#Apply new wheel_positions
self.wheels[0].set_pos([wheel_pos[0][0] + self.pos[0], wheel_pos[0][1] + self.pos[1]])
self.wheels[1].set_pos([wheel_pos[1][0] + self.pos[0], wheel_pos[1][1] + self.pos[1]])
self.wheels[2].set_pos([wheel_pos[2][0] + self.pos[0], wheel_pos[2][1] + self.pos[1]])
self.wheels[3].set_pos([wheel_pos[3][0] + self.pos[0], wheel_pos[3][1] + self.pos[1]])
#Apply new wheel rotations
self.wheels[0].set_ang(self.ang)
self.wheels[1].set_ang(self.ang)
self.wheels[2].set_ang(self.wheel_ang + self.ang)
self.wheels[3].set_ang(self.wheel_ang + self.ang)
def render(self):
for wheel in self.wheels:
wheel.render()
pygame.draw.polygon(self.window, self.colours["red"], self.points_mat)
def display_debug(self):
pygame.draw.line(self.window, self.colours["yellow"], self.pos + self.rear_axel + 10000 * self.rear_norm,
self.pos + self.rear_axel - 10000 * self.rear_norm)
pygame.draw.line(self.window, self.colours["yellow"], self.pos + self.front_axel + 10000 * self.front_norm,
self.pos + self.front_axel - 10000 * self.front_norm)
pygame.draw.circle(self.window, self.colours["blue"], [int(self.turning_point[0]), int(self.turning_point[1])], 3)
#----------------------------------------------------------------------------------------------------------------------------------
def main():
pass
if __name__ == "__main__":
main() | turning_o.py | import pygame
import math
import numpy as np
from pygame.locals import *
class Wheel():
def __init__(self, window, colour, pos, size):
self.window = window
self.colour = colour
self.size = size
self.pos = np.array(pos)
self.ang = 0.0
self.mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
self.points_mat = np.array([[-self.size / 2, -self.size],
[self.size / 2, -self.size],
[self.size / 2, self.size],
[-self.size / 2, self.size]])
def render(self):
points = np.matmul(self.mat, np.transpose(self.points_mat))
points = np.transpose(points)
pygame.draw.polygon(self.window, self.colour, points + self.pos)
def set_pos(self, pos):
self.pos = pos
def set_ang(self, ang):
self.ang = ang
self.mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
class Car():
def __init__(self, window, colours, pos, size):
self.window = window
self.colours = colours
self.size = size
self.pos = np.array(pos)
self.speed = 0.0
self.vel = np.array([0.0,0.0])
self.acc = 0.0
self.term_speed = 400
self.ang = 0.0
self.ang_mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
self.wheel_vel = 0.0
self.wheel_ang = 0.0
self.max_wheel_ang = math.pi/6
self.points_mat = np.array([[self.pos[0] - self.size, self.pos[1] - self.size*2.5],
[self.pos[0] + self.size, self.pos[1] - self.size*2.5],
[self.pos[0] + self.size, self.pos[1] + self.size*2.5],
[self.pos[0] - self.size, self.pos[1] + self.size*2.5]])
self.wheel_pos = np.array([[-self.size,-self.size*1.6],
[ self.size, -self.size*1.6],
[ self.size, self.size*1.6],
[-self.size, self.size*1.6],
[0, self.size*1.6],
[0, -self.size*1.6]])
self.front_axel = np.array([self.pos[0], self.pos[1] + self.size * 1.6])
self.rear_axel = np.array([self.pos[0], self.pos[1] - self.size * 1.6])
self.turning_point = np.array([0.0,0.0])
self.wheels = []
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] - self.size, self.pos[1] - self.size * 1.6], size / 3))
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] + self.size, self.pos[1] - self.size * 1.6], size / 3))
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] + self.size, self.pos[1] + self.size * 1.6], size / 3))
self.wheels.append(Wheel(window, colours["grey"], [self.pos[0] - self.size, self.pos[1] + self.size * 1.6], size / 3))
def dynamics(self, frame_time):
pressed = pygame.key.get_pressed()
#Rotation inputs
if (pressed[pygame.K_a] and not pressed[pygame.K_d]) or (not pressed[pygame.K_a] and pressed[pygame.K_d]):
if pressed[pygame.K_a] and not pressed[pygame.K_d]:
self.wheel_vel = -2
elif not pressed[pygame.K_a] and pressed[pygame.K_d]:
self.wheel_vel = 2
else:
if self.wheel_ang > 0.01:
self.wheel_vel = -2
elif self.wheel_ang < -0.01:
self.wheel_vel = 2
else:
self.wheel_vel = 0
#Limit rotation angle to maximum
self.wheel_ang += self.wheel_vel * frame_time
if self.wheel_ang > self.max_wheel_ang:
self.wheel_ang = self.max_wheel_ang
elif self.wheel_ang < -self.max_wheel_ang:
self.wheel_ang = -self.max_wheel_ang
#Translation inputs
if pressed[pygame.K_w] and not pressed[pygame.K_s]:
self.acc = 100
elif not pressed[pygame.K_w] and pressed[pygame.K_s]:
self.acc = -100
else:
if self.speed > 0.0001:
self.acc = -50
elif self.speed < 0.0001:
self.acc = 50
else:
self.acc = 0
#Limit speed to terminal speed
if self.speed > self.term_speed:
self.speed = self.term_speed
elif self.speed < -self.term_speed/3:
self.speed = -self.term_speed/3
#Recalculate wheel positions
wheel_pos = np.matmul(self.ang_mat, np.transpose(self.wheel_pos))
wheel_pos = np.transpose(wheel_pos)
#Find axel pivot points
self.front_axel = wheel_pos[4]
self.rear_axel = wheel_pos[5]
#Recalculate wheel matrix
self.front_mat = np.array([[math.cos(self.wheel_ang + self.ang), -math.sin(self.wheel_ang + self.ang)],
[math.sin(self.wheel_ang + self.ang), math.cos(self.wheel_ang + self.ang)]])
#Calculate wheel normals
self.front_norm = np.matmul(self.front_mat, np.transpose(np.array([1.0, 0.0])))
self.front_norm = np.transpose(self.front_norm)
self.rear_norm = np.matmul(self.ang_mat, np.transpose(np.array([1.0, 0.0])))
self.rear_norm = np.transpose(self.rear_norm)
#Find turing point
if (self.rear_norm[0] * self.front_norm[1] - self.rear_norm[1] * self.front_norm[0]) != 0:
mu = ((self.rear_norm[0]*(self.rear_axel[1] - self.front_axel[1]) - self.rear_norm[1]*(self.rear_axel[0] - self.front_axel[0]))
/ (self.rear_norm[0] * self.front_norm[1] - self.rear_norm[1] * self.front_norm[0]))
self.turning_point = self.front_axel + mu * self.front_norm + self.pos
else:
mu = 100000
self.turning_point = self.rear_axel + mu * self.rear_norm + self.pos
#Move car geomery away from turning point
self.points_mat = np.array([[self.points_mat[0][0] - self.turning_point[0], self.points_mat[0][1] - self.turning_point[1]],
[self.points_mat[1][0] - self.turning_point[0], self.points_mat[1][1] - self.turning_point[1]],
[self.points_mat[2][0] - self.turning_point[0], self.points_mat[2][1] - self.turning_point[1]],
[self.points_mat[3][0] - self.turning_point[0], self.points_mat[3][1] - self.turning_point[1]]])
#Calculate rotation angle
radius = np.sqrt((self.pos - self.turning_point).dot(self.pos - self.turning_point))
self.speed += self.acc * frame_time
displacement = self.speed * frame_time
angle = displacement / radius
if self.wheel_ang < 0:
angle *= -1
self.ang += angle
self.ang_mat = np.array([[math.cos(self.ang), -math.sin(self.ang)],
[math.sin(self.ang), math.cos(self.ang)]])
translation_mat = np.array([[math.cos(angle), -math.sin(angle)],
[math.sin(angle), math.cos(angle)]])
#Apply translation matrix
self.points_mat = np.matmul(translation_mat, np.transpose(self.points_mat))
self.points_mat = np.transpose(self.points_mat)
#Move car geometry back from turning point
self.points_mat = np.array([[self.points_mat[0][0] + self.turning_point[0], self.points_mat[0][1] + self.turning_point[1]],
[self.points_mat[1][0] + self.turning_point[0], self.points_mat[1][1] + self.turning_point[1]],
[self.points_mat[2][0] + self.turning_point[0], self.points_mat[2][1] + self.turning_point[1]],
[self.points_mat[3][0] + self.turning_point[0], self.points_mat[3][1] + self.turning_point[1]]])
self.pos = np.array([(self.points_mat[0][0] + self.points_mat[1][0] + self.points_mat[2][0] + self.points_mat[3][0]) / 4,
(self.points_mat[0][1] + self.points_mat[1][1] + self.points_mat[2][1] + self.points_mat[3][1]) / 4])
#Recalculate wheel positions
wheel_pos = np.matmul(self.ang_mat, np.transpose(self.wheel_pos))
wheel_pos = np.transpose(wheel_pos)
#Apply new wheel_positions
self.wheels[0].set_pos([wheel_pos[0][0] + self.pos[0], wheel_pos[0][1] + self.pos[1]])
self.wheels[1].set_pos([wheel_pos[1][0] + self.pos[0], wheel_pos[1][1] + self.pos[1]])
self.wheels[2].set_pos([wheel_pos[2][0] + self.pos[0], wheel_pos[2][1] + self.pos[1]])
self.wheels[3].set_pos([wheel_pos[3][0] + self.pos[0], wheel_pos[3][1] + self.pos[1]])
#Apply new wheel rotations
self.wheels[0].set_ang(self.ang)
self.wheels[1].set_ang(self.ang)
self.wheels[2].set_ang(self.wheel_ang + self.ang)
self.wheels[3].set_ang(self.wheel_ang + self.ang)
def render(self):
for wheel in self.wheels:
wheel.render()
pygame.draw.polygon(self.window, self.colours["red"], self.points_mat)
def display_debug(self):
pygame.draw.line(self.window, self.colours["yellow"], self.pos + self.rear_axel + 10000 * self.rear_norm,
self.pos + self.rear_axel - 10000 * self.rear_norm)
pygame.draw.line(self.window, self.colours["yellow"], self.pos + self.front_axel + 10000 * self.front_norm,
self.pos + self.front_axel - 10000 * self.front_norm)
pygame.draw.circle(self.window, self.colours["blue"], [int(self.turning_point[0]), int(self.turning_point[1])], 3)
#----------------------------------------------------------------------------------------------------------------------------------
def main():
pass
if __name__ == "__main__":
main() | 0.418816 | 0.432483 |
import pprint
import re # noqa: F401
import six
from yapily.configuration import Configuration
class OverdraftOverdraftFeeChargeDetail(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'application_frequency': 'str',
'calculation_frequency': 'str',
'fee_amount': 'str',
'fee_rate': 'str',
'fee_rate_type': 'str',
'fee_type': 'str',
'incremental_borrowing_amount': 'str',
'notes': 'list[str]',
'other_application_frequency': 'OtherApplicationFrequency',
'other_calculation_frequency': 'OtherCalculationFrequency',
'other_fee_rate_type': 'OtherFeeRateType',
'other_fee_type': 'OtherFeeType',
'overdraft_control_indicator': 'bool',
'overdraft_fee_charge_cap': 'OverdraftOverdraftFeeChargeCap'
}
attribute_map = {
'application_frequency': 'ApplicationFrequency',
'calculation_frequency': 'CalculationFrequency',
'fee_amount': 'FeeAmount',
'fee_rate': 'FeeRate',
'fee_rate_type': 'FeeRateType',
'fee_type': 'FeeType',
'incremental_borrowing_amount': 'IncrementalBorrowingAmount',
'notes': 'Notes',
'other_application_frequency': 'OtherApplicationFrequency',
'other_calculation_frequency': 'OtherCalculationFrequency',
'other_fee_rate_type': 'OtherFeeRateType',
'other_fee_type': 'OtherFeeType',
'overdraft_control_indicator': 'OverdraftControlIndicator',
'overdraft_fee_charge_cap': 'OverdraftFeeChargeCap'
}
def __init__(self, application_frequency=None, calculation_frequency=None, fee_amount=None, fee_rate=None, fee_rate_type=None, fee_type=None, incremental_borrowing_amount=None, notes=None, other_application_frequency=None, other_calculation_frequency=None, other_fee_rate_type=None, other_fee_type=None, overdraft_control_indicator=None, overdraft_fee_charge_cap=None, local_vars_configuration=None): # noqa: E501
"""OverdraftOverdraftFeeChargeDetail - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._application_frequency = None
self._calculation_frequency = None
self._fee_amount = None
self._fee_rate = None
self._fee_rate_type = None
self._fee_type = None
self._incremental_borrowing_amount = None
self._notes = None
self._other_application_frequency = None
self._other_calculation_frequency = None
self._other_fee_rate_type = None
self._other_fee_type = None
self._overdraft_control_indicator = None
self._overdraft_fee_charge_cap = None
self.discriminator = None
if application_frequency is not None:
self.application_frequency = application_frequency
if calculation_frequency is not None:
self.calculation_frequency = calculation_frequency
if fee_amount is not None:
self.fee_amount = fee_amount
if fee_rate is not None:
self.fee_rate = fee_rate
if fee_rate_type is not None:
self.fee_rate_type = fee_rate_type
if fee_type is not None:
self.fee_type = fee_type
if incremental_borrowing_amount is not None:
self.incremental_borrowing_amount = incremental_borrowing_amount
if notes is not None:
self.notes = notes
if other_application_frequency is not None:
self.other_application_frequency = other_application_frequency
if other_calculation_frequency is not None:
self.other_calculation_frequency = other_calculation_frequency
if other_fee_rate_type is not None:
self.other_fee_rate_type = other_fee_rate_type
if other_fee_type is not None:
self.other_fee_type = other_fee_type
if overdraft_control_indicator is not None:
self.overdraft_control_indicator = overdraft_control_indicator
if overdraft_fee_charge_cap is not None:
self.overdraft_fee_charge_cap = overdraft_fee_charge_cap
@property
def application_frequency(self):
"""Gets the application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._application_frequency
@application_frequency.setter
def application_frequency(self, application_frequency):
"""Sets the application_frequency of this OverdraftOverdraftFeeChargeDetail.
:param application_frequency: The application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
allowed_values = ["AccountClosing", "AccountOpening", "AcademicTerm", "ChargingPeriod", "Daily", "PerItem", "Monthly", "OnAccountAnniversary", "Other", "PerHour", "PerOccurrence", "PerSheet", "PerTransaction", "PerTransactionAmount", "PerTransactionPercentage", "Quarterly", "SixMonthly", "StatementMonthly", "Weekly", "Yearly"] # noqa: E501
if self.local_vars_configuration.client_side_validation and application_frequency not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `application_frequency` ({0}), must be one of {1}" # noqa: E501
.format(application_frequency, allowed_values)
)
self._application_frequency = application_frequency
@property
def calculation_frequency(self):
"""Gets the calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._calculation_frequency
@calculation_frequency.setter
def calculation_frequency(self, calculation_frequency):
"""Sets the calculation_frequency of this OverdraftOverdraftFeeChargeDetail.
:param calculation_frequency: The calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
allowed_values = ["AccountClosing", "AccountOpening", "AcademicTerm", "ChargingPeriod", "Daily", "PerItem", "Monthly", "OnAccountAnniversary", "Other", "PerHour", "PerOccurrence", "PerSheet", "PerTransaction", "PerTransactionAmount", "PerTransactionPercentage", "Quarterly", "SixMonthly", "StatementMonthly", "Weekly", "Yearly"] # noqa: E501
if self.local_vars_configuration.client_side_validation and calculation_frequency not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `calculation_frequency` ({0}), must be one of {1}" # noqa: E501
.format(calculation_frequency, allowed_values)
)
self._calculation_frequency = calculation_frequency
@property
def fee_amount(self):
"""Gets the fee_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The fee_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._fee_amount
@fee_amount.setter
def fee_amount(self, fee_amount):
"""Sets the fee_amount of this OverdraftOverdraftFeeChargeDetail.
:param fee_amount: The fee_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
self._fee_amount = fee_amount
@property
def fee_rate(self):
"""Gets the fee_rate of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The fee_rate of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._fee_rate
@fee_rate.setter
def fee_rate(self, fee_rate):
"""Sets the fee_rate of this OverdraftOverdraftFeeChargeDetail.
:param fee_rate: The fee_rate of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
self._fee_rate = fee_rate
@property
def fee_rate_type(self):
"""Gets the fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._fee_rate_type
@fee_rate_type.setter
def fee_rate_type(self, fee_rate_type):
"""Sets the fee_rate_type of this OverdraftOverdraftFeeChargeDetail.
:param fee_rate_type: The fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
allowed_values = ["LinkedBaseRate", "Gross", "Net", "Other"] # noqa: E501
if self.local_vars_configuration.client_side_validation and fee_rate_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `fee_rate_type` ({0}), must be one of {1}" # noqa: E501
.format(fee_rate_type, allowed_values)
)
self._fee_rate_type = fee_rate_type
@property
def fee_type(self):
"""Gets the fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._fee_type
@fee_type.setter
def fee_type(self, fee_type):
"""Sets the fee_type of this OverdraftOverdraftFeeChargeDetail.
:param fee_type: The fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
allowed_values = ["ArrangedOverdraft", "EmergencyBorrowing", "BorrowingItem", "OverdraftRenewal", "AnnualReview", "OverdraftSetup", "Surcharge", "TempOverdraft", "UnauthorisedBorrowing", "UnauthorisedPaidTrans", "Other", "UnauthorisedUnpaidTrans"] # noqa: E501
if self.local_vars_configuration.client_side_validation and fee_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `fee_type` ({0}), must be one of {1}" # noqa: E501
.format(fee_type, allowed_values)
)
self._fee_type = fee_type
@property
def incremental_borrowing_amount(self):
"""Gets the incremental_borrowing_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The incremental_borrowing_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._incremental_borrowing_amount
@incremental_borrowing_amount.setter
def incremental_borrowing_amount(self, incremental_borrowing_amount):
"""Sets the incremental_borrowing_amount of this OverdraftOverdraftFeeChargeDetail.
:param incremental_borrowing_amount: The incremental_borrowing_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
self._incremental_borrowing_amount = incremental_borrowing_amount
@property
def notes(self):
"""Gets the notes of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The notes of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: list[str]
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this OverdraftOverdraftFeeChargeDetail.
:param notes: The notes of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: list[str]
"""
self._notes = notes
@property
def other_application_frequency(self):
"""Gets the other_application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The other_application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OtherApplicationFrequency
"""
return self._other_application_frequency
@other_application_frequency.setter
def other_application_frequency(self, other_application_frequency):
"""Sets the other_application_frequency of this OverdraftOverdraftFeeChargeDetail.
:param other_application_frequency: The other_application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OtherApplicationFrequency
"""
self._other_application_frequency = other_application_frequency
@property
def other_calculation_frequency(self):
"""Gets the other_calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The other_calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OtherCalculationFrequency
"""
return self._other_calculation_frequency
@other_calculation_frequency.setter
def other_calculation_frequency(self, other_calculation_frequency):
"""Sets the other_calculation_frequency of this OverdraftOverdraftFeeChargeDetail.
:param other_calculation_frequency: The other_calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OtherCalculationFrequency
"""
self._other_calculation_frequency = other_calculation_frequency
@property
def other_fee_rate_type(self):
"""Gets the other_fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The other_fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OtherFeeRateType
"""
return self._other_fee_rate_type
@other_fee_rate_type.setter
def other_fee_rate_type(self, other_fee_rate_type):
"""Sets the other_fee_rate_type of this OverdraftOverdraftFeeChargeDetail.
:param other_fee_rate_type: The other_fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OtherFeeRateType
"""
self._other_fee_rate_type = other_fee_rate_type
@property
def other_fee_type(self):
"""Gets the other_fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The other_fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OtherFeeType
"""
return self._other_fee_type
@other_fee_type.setter
def other_fee_type(self, other_fee_type):
"""Sets the other_fee_type of this OverdraftOverdraftFeeChargeDetail.
:param other_fee_type: The other_fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OtherFeeType
"""
self._other_fee_type = other_fee_type
@property
def overdraft_control_indicator(self):
"""Gets the overdraft_control_indicator of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The overdraft_control_indicator of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: bool
"""
return self._overdraft_control_indicator
@overdraft_control_indicator.setter
def overdraft_control_indicator(self, overdraft_control_indicator):
"""Sets the overdraft_control_indicator of this OverdraftOverdraftFeeChargeDetail.
:param overdraft_control_indicator: The overdraft_control_indicator of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: bool
"""
self._overdraft_control_indicator = overdraft_control_indicator
@property
def overdraft_fee_charge_cap(self):
"""Gets the overdraft_fee_charge_cap of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The overdraft_fee_charge_cap of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OverdraftOverdraftFeeChargeCap
"""
return self._overdraft_fee_charge_cap
@overdraft_fee_charge_cap.setter
def overdraft_fee_charge_cap(self, overdraft_fee_charge_cap):
"""Sets the overdraft_fee_charge_cap of this OverdraftOverdraftFeeChargeDetail.
:param overdraft_fee_charge_cap: The overdraft_fee_charge_cap of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OverdraftOverdraftFeeChargeCap
"""
self._overdraft_fee_charge_cap = overdraft_fee_charge_cap
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OverdraftOverdraftFeeChargeDetail):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OverdraftOverdraftFeeChargeDetail):
return True
return self.to_dict() != other.to_dict() | sdk/yapily/models/overdraft_overdraft_fee_charge_detail.py | import pprint
import re # noqa: F401
import six
from yapily.configuration import Configuration
class OverdraftOverdraftFeeChargeDetail(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'application_frequency': 'str',
'calculation_frequency': 'str',
'fee_amount': 'str',
'fee_rate': 'str',
'fee_rate_type': 'str',
'fee_type': 'str',
'incremental_borrowing_amount': 'str',
'notes': 'list[str]',
'other_application_frequency': 'OtherApplicationFrequency',
'other_calculation_frequency': 'OtherCalculationFrequency',
'other_fee_rate_type': 'OtherFeeRateType',
'other_fee_type': 'OtherFeeType',
'overdraft_control_indicator': 'bool',
'overdraft_fee_charge_cap': 'OverdraftOverdraftFeeChargeCap'
}
attribute_map = {
'application_frequency': 'ApplicationFrequency',
'calculation_frequency': 'CalculationFrequency',
'fee_amount': 'FeeAmount',
'fee_rate': 'FeeRate',
'fee_rate_type': 'FeeRateType',
'fee_type': 'FeeType',
'incremental_borrowing_amount': 'IncrementalBorrowingAmount',
'notes': 'Notes',
'other_application_frequency': 'OtherApplicationFrequency',
'other_calculation_frequency': 'OtherCalculationFrequency',
'other_fee_rate_type': 'OtherFeeRateType',
'other_fee_type': 'OtherFeeType',
'overdraft_control_indicator': 'OverdraftControlIndicator',
'overdraft_fee_charge_cap': 'OverdraftFeeChargeCap'
}
def __init__(self, application_frequency=None, calculation_frequency=None, fee_amount=None, fee_rate=None, fee_rate_type=None, fee_type=None, incremental_borrowing_amount=None, notes=None, other_application_frequency=None, other_calculation_frequency=None, other_fee_rate_type=None, other_fee_type=None, overdraft_control_indicator=None, overdraft_fee_charge_cap=None, local_vars_configuration=None): # noqa: E501
"""OverdraftOverdraftFeeChargeDetail - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._application_frequency = None
self._calculation_frequency = None
self._fee_amount = None
self._fee_rate = None
self._fee_rate_type = None
self._fee_type = None
self._incremental_borrowing_amount = None
self._notes = None
self._other_application_frequency = None
self._other_calculation_frequency = None
self._other_fee_rate_type = None
self._other_fee_type = None
self._overdraft_control_indicator = None
self._overdraft_fee_charge_cap = None
self.discriminator = None
if application_frequency is not None:
self.application_frequency = application_frequency
if calculation_frequency is not None:
self.calculation_frequency = calculation_frequency
if fee_amount is not None:
self.fee_amount = fee_amount
if fee_rate is not None:
self.fee_rate = fee_rate
if fee_rate_type is not None:
self.fee_rate_type = fee_rate_type
if fee_type is not None:
self.fee_type = fee_type
if incremental_borrowing_amount is not None:
self.incremental_borrowing_amount = incremental_borrowing_amount
if notes is not None:
self.notes = notes
if other_application_frequency is not None:
self.other_application_frequency = other_application_frequency
if other_calculation_frequency is not None:
self.other_calculation_frequency = other_calculation_frequency
if other_fee_rate_type is not None:
self.other_fee_rate_type = other_fee_rate_type
if other_fee_type is not None:
self.other_fee_type = other_fee_type
if overdraft_control_indicator is not None:
self.overdraft_control_indicator = overdraft_control_indicator
if overdraft_fee_charge_cap is not None:
self.overdraft_fee_charge_cap = overdraft_fee_charge_cap
@property
def application_frequency(self):
"""Gets the application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._application_frequency
@application_frequency.setter
def application_frequency(self, application_frequency):
"""Sets the application_frequency of this OverdraftOverdraftFeeChargeDetail.
:param application_frequency: The application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
allowed_values = ["AccountClosing", "AccountOpening", "AcademicTerm", "ChargingPeriod", "Daily", "PerItem", "Monthly", "OnAccountAnniversary", "Other", "PerHour", "PerOccurrence", "PerSheet", "PerTransaction", "PerTransactionAmount", "PerTransactionPercentage", "Quarterly", "SixMonthly", "StatementMonthly", "Weekly", "Yearly"] # noqa: E501
if self.local_vars_configuration.client_side_validation and application_frequency not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `application_frequency` ({0}), must be one of {1}" # noqa: E501
.format(application_frequency, allowed_values)
)
self._application_frequency = application_frequency
@property
def calculation_frequency(self):
"""Gets the calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._calculation_frequency
@calculation_frequency.setter
def calculation_frequency(self, calculation_frequency):
"""Sets the calculation_frequency of this OverdraftOverdraftFeeChargeDetail.
:param calculation_frequency: The calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
allowed_values = ["AccountClosing", "AccountOpening", "AcademicTerm", "ChargingPeriod", "Daily", "PerItem", "Monthly", "OnAccountAnniversary", "Other", "PerHour", "PerOccurrence", "PerSheet", "PerTransaction", "PerTransactionAmount", "PerTransactionPercentage", "Quarterly", "SixMonthly", "StatementMonthly", "Weekly", "Yearly"] # noqa: E501
if self.local_vars_configuration.client_side_validation and calculation_frequency not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `calculation_frequency` ({0}), must be one of {1}" # noqa: E501
.format(calculation_frequency, allowed_values)
)
self._calculation_frequency = calculation_frequency
@property
def fee_amount(self):
"""Gets the fee_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The fee_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._fee_amount
@fee_amount.setter
def fee_amount(self, fee_amount):
"""Sets the fee_amount of this OverdraftOverdraftFeeChargeDetail.
:param fee_amount: The fee_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
self._fee_amount = fee_amount
@property
def fee_rate(self):
"""Gets the fee_rate of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The fee_rate of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._fee_rate
@fee_rate.setter
def fee_rate(self, fee_rate):
"""Sets the fee_rate of this OverdraftOverdraftFeeChargeDetail.
:param fee_rate: The fee_rate of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
self._fee_rate = fee_rate
@property
def fee_rate_type(self):
"""Gets the fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._fee_rate_type
@fee_rate_type.setter
def fee_rate_type(self, fee_rate_type):
"""Sets the fee_rate_type of this OverdraftOverdraftFeeChargeDetail.
:param fee_rate_type: The fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
allowed_values = ["LinkedBaseRate", "Gross", "Net", "Other"] # noqa: E501
if self.local_vars_configuration.client_side_validation and fee_rate_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `fee_rate_type` ({0}), must be one of {1}" # noqa: E501
.format(fee_rate_type, allowed_values)
)
self._fee_rate_type = fee_rate_type
@property
def fee_type(self):
"""Gets the fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._fee_type
@fee_type.setter
def fee_type(self, fee_type):
"""Sets the fee_type of this OverdraftOverdraftFeeChargeDetail.
:param fee_type: The fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
allowed_values = ["ArrangedOverdraft", "EmergencyBorrowing", "BorrowingItem", "OverdraftRenewal", "AnnualReview", "OverdraftSetup", "Surcharge", "TempOverdraft", "UnauthorisedBorrowing", "UnauthorisedPaidTrans", "Other", "UnauthorisedUnpaidTrans"] # noqa: E501
if self.local_vars_configuration.client_side_validation and fee_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `fee_type` ({0}), must be one of {1}" # noqa: E501
.format(fee_type, allowed_values)
)
self._fee_type = fee_type
@property
def incremental_borrowing_amount(self):
"""Gets the incremental_borrowing_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The incremental_borrowing_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: str
"""
return self._incremental_borrowing_amount
@incremental_borrowing_amount.setter
def incremental_borrowing_amount(self, incremental_borrowing_amount):
"""Sets the incremental_borrowing_amount of this OverdraftOverdraftFeeChargeDetail.
:param incremental_borrowing_amount: The incremental_borrowing_amount of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: str
"""
self._incremental_borrowing_amount = incremental_borrowing_amount
@property
def notes(self):
"""Gets the notes of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The notes of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: list[str]
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this OverdraftOverdraftFeeChargeDetail.
:param notes: The notes of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: list[str]
"""
self._notes = notes
@property
def other_application_frequency(self):
"""Gets the other_application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The other_application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OtherApplicationFrequency
"""
return self._other_application_frequency
@other_application_frequency.setter
def other_application_frequency(self, other_application_frequency):
"""Sets the other_application_frequency of this OverdraftOverdraftFeeChargeDetail.
:param other_application_frequency: The other_application_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OtherApplicationFrequency
"""
self._other_application_frequency = other_application_frequency
@property
def other_calculation_frequency(self):
"""Gets the other_calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The other_calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OtherCalculationFrequency
"""
return self._other_calculation_frequency
@other_calculation_frequency.setter
def other_calculation_frequency(self, other_calculation_frequency):
"""Sets the other_calculation_frequency of this OverdraftOverdraftFeeChargeDetail.
:param other_calculation_frequency: The other_calculation_frequency of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OtherCalculationFrequency
"""
self._other_calculation_frequency = other_calculation_frequency
@property
def other_fee_rate_type(self):
"""Gets the other_fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The other_fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OtherFeeRateType
"""
return self._other_fee_rate_type
@other_fee_rate_type.setter
def other_fee_rate_type(self, other_fee_rate_type):
"""Sets the other_fee_rate_type of this OverdraftOverdraftFeeChargeDetail.
:param other_fee_rate_type: The other_fee_rate_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OtherFeeRateType
"""
self._other_fee_rate_type = other_fee_rate_type
@property
def other_fee_type(self):
"""Gets the other_fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The other_fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OtherFeeType
"""
return self._other_fee_type
@other_fee_type.setter
def other_fee_type(self, other_fee_type):
"""Sets the other_fee_type of this OverdraftOverdraftFeeChargeDetail.
:param other_fee_type: The other_fee_type of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OtherFeeType
"""
self._other_fee_type = other_fee_type
@property
def overdraft_control_indicator(self):
"""Gets the overdraft_control_indicator of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The overdraft_control_indicator of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: bool
"""
return self._overdraft_control_indicator
@overdraft_control_indicator.setter
def overdraft_control_indicator(self, overdraft_control_indicator):
"""Sets the overdraft_control_indicator of this OverdraftOverdraftFeeChargeDetail.
:param overdraft_control_indicator: The overdraft_control_indicator of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: bool
"""
self._overdraft_control_indicator = overdraft_control_indicator
@property
def overdraft_fee_charge_cap(self):
"""Gets the overdraft_fee_charge_cap of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:return: The overdraft_fee_charge_cap of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:rtype: OverdraftOverdraftFeeChargeCap
"""
return self._overdraft_fee_charge_cap
@overdraft_fee_charge_cap.setter
def overdraft_fee_charge_cap(self, overdraft_fee_charge_cap):
"""Sets the overdraft_fee_charge_cap of this OverdraftOverdraftFeeChargeDetail.
:param overdraft_fee_charge_cap: The overdraft_fee_charge_cap of this OverdraftOverdraftFeeChargeDetail. # noqa: E501
:type: OverdraftOverdraftFeeChargeCap
"""
self._overdraft_fee_charge_cap = overdraft_fee_charge_cap
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OverdraftOverdraftFeeChargeDetail):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OverdraftOverdraftFeeChargeDetail):
return True
return self.to_dict() != other.to_dict() | 0.728555 | 0.14978 |
import numpy as np
import matplotlib
from matplotlib import pyplot as pl, cm, colors
# Function to extract a colourmap from cmap object, from https://gist.github.com/denis-bz/8052855
def get_cmap( cmap, name=None, n=256 ):
""" in: a name "Blues" "BuGn_r" ... of a builtin cmap (case-sensitive)
or a filename, np.loadtxt() n x 3 or 4 ints 0..255 or floats 0..1
or a cmap already
or a numpy array.
See http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
or in IPython, pl.cm.<tab>
"""
if isinstance( cmap, colors.Colormap ):
return cmap
if isinstance( cmap, basestring ):
if cmap in cm.cmap_d:
return pl.get_cmap( cmap ) # "Blues" ...
A = np.loadtxt( cmap, delimiter=None ) # None: white space
name = name or cmap.split("/")[-1] .split(".")[0] # .../xx.csv -> xx
else:
A = cmap # numpy array or array-like
return array_cmap( A, name, n=n )
# Function to create a truncated version of an existing colourmap, from https://gist.github.com/denis-bz/8052855
def truncate_colormap( cmap, minval=0.0, maxval=1.0, n=256 ):
""" mycolormap = truncate_colormap(
cmap name or file or ndarray,
minval=0.2, maxval=0.8 ): subset
minval=1, maxval=0 ) : reverse
by unutbu http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
"""
cmap = get_cmap( cmap )
name = "%s-trunc-%.2g-%.2g" % (cmap.name, minval, maxval)
return colors.LinearSegmentedColormap.from_list(
name, cmap( np.linspace( minval, maxval, n )))
# Function to apply an arbitrary function to a colourmap
def cmap_map(function, cmap):
""" Applies function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.
This routine will break any discontinuous points in a colormap.
by http://scipy-cookbook.readthedocs.io/items/Matplotlib_ColormapTransformations.html
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red', 'green', 'blue'):
step_dict[key] = list(map(lambda x: x[0], cdict[key]))
step_list = sum(step_dict.values(), [])
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : np.array(cmap(step)[0:3])
old_LUT = np.array(list(map(reduced_cmap, step_list)))
new_LUT = np.array(list(map(function, old_LUT)))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i, key in enumerate(['red','green','blue']):
this_cdict = {}
for j, step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j, i]
elif new_LUT[j,i] != old_LUT[j, i]:
this_cdict[step] = new_LUT[j, i]
colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024) | ChrisFuncs/FromGitHub/denis_bz.py | import numpy as np
import matplotlib
from matplotlib import pyplot as pl, cm, colors
# Function to extract a colourmap from cmap object, from https://gist.github.com/denis-bz/8052855
def get_cmap( cmap, name=None, n=256 ):
""" in: a name "Blues" "BuGn_r" ... of a builtin cmap (case-sensitive)
or a filename, np.loadtxt() n x 3 or 4 ints 0..255 or floats 0..1
or a cmap already
or a numpy array.
See http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
or in IPython, pl.cm.<tab>
"""
if isinstance( cmap, colors.Colormap ):
return cmap
if isinstance( cmap, basestring ):
if cmap in cm.cmap_d:
return pl.get_cmap( cmap ) # "Blues" ...
A = np.loadtxt( cmap, delimiter=None ) # None: white space
name = name or cmap.split("/")[-1] .split(".")[0] # .../xx.csv -> xx
else:
A = cmap # numpy array or array-like
return array_cmap( A, name, n=n )
# Function to create a truncated version of an existing colourmap, from https://gist.github.com/denis-bz/8052855
def truncate_colormap( cmap, minval=0.0, maxval=1.0, n=256 ):
""" mycolormap = truncate_colormap(
cmap name or file or ndarray,
minval=0.2, maxval=0.8 ): subset
minval=1, maxval=0 ) : reverse
by unutbu http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
"""
cmap = get_cmap( cmap )
name = "%s-trunc-%.2g-%.2g" % (cmap.name, minval, maxval)
return colors.LinearSegmentedColormap.from_list(
name, cmap( np.linspace( minval, maxval, n )))
# Function to apply an arbitrary function to a colourmap
def cmap_map(function, cmap):
""" Applies function (which should operate on vectors of shape 3: [r, g, b]), on colormap cmap.
This routine will break any discontinuous points in a colormap.
by http://scipy-cookbook.readthedocs.io/items/Matplotlib_ColormapTransformations.html
"""
cdict = cmap._segmentdata
step_dict = {}
# Firt get the list of points where the segments start or end
for key in ('red', 'green', 'blue'):
step_dict[key] = list(map(lambda x: x[0], cdict[key]))
step_list = sum(step_dict.values(), [])
step_list = np.array(list(set(step_list)))
# Then compute the LUT, and apply the function to the LUT
reduced_cmap = lambda step : np.array(cmap(step)[0:3])
old_LUT = np.array(list(map(reduced_cmap, step_list)))
new_LUT = np.array(list(map(function, old_LUT)))
# Now try to make a minimal segment definition of the new LUT
cdict = {}
for i, key in enumerate(['red','green','blue']):
this_cdict = {}
for j, step in enumerate(step_list):
if step in step_dict[key]:
this_cdict[step] = new_LUT[j, i]
elif new_LUT[j,i] != old_LUT[j, i]:
this_cdict[step] = new_LUT[j, i]
colorvector = list(map(lambda x: x + (x[1], ), this_cdict.items()))
colorvector.sort()
cdict[key] = colorvector
return matplotlib.colors.LinearSegmentedColormap('colormap',cdict,1024) | 0.571049 | 0.495667 |
import unittest
from graphtheory.structures.edges import Edge
from graphtheory.structures.graphs import Graph
from graphtheory.traversing.dfs import DFSWithStack
from graphtheory.traversing.dfs import DFSWithRecursion
from graphtheory.traversing.dfs import SimpleDFS
# 0---1 2---3
# | | / | / |
# 4 5---6---7
class TestDFS(unittest.TestCase):
def setUp(self):
# The graph from Cormen p.607
self.N = 8 # number of nodes
self.G = Graph(self.N)
self.nodes = range(self.N)
self.edges = [
Edge(0, 4, 2), Edge(0, 1, 3), Edge(1, 5, 4), Edge(5, 2, 5),
Edge(5, 6, 6), Edge(2, 6, 7), Edge(2, 3, 8), Edge(6, 3, 9),
Edge(6, 7, 10), Edge(3, 7, 11)]
for node in self.nodes:
self.G.add_node(node)
for edge in self.edges:
self.G.add_edge(edge)
#print self.G
#self.G.show()
def test_dfs_with_stack(self):
self.assertEqual(self.G.v(), self.N)
pre_order = []
post_order = []
algorithm = DFSWithStack(self.G)
algorithm.run(1, pre_action=lambda node: pre_order.append(node),
post_action=lambda node: post_order.append(node))
pre_order_expected = [1, 0, 5, 2, 6, 3, 7, 4]
post_order_expected = [1, 5, 6, 7, 3, 2, 0, 4]
self.assertEqual(pre_order, pre_order_expected)
self.assertEqual(post_order, post_order_expected)
dd_expected = {0: 2, 1: 1, 2: 5, 3: 8, 4: 14, 5: 3, 6: 6, 7: 9}
ff_expected = {0: 15, 1: 4, 2: 13, 3: 12, 4: 16, 5: 7, 6: 10, 7: 11}
self.assertEqual(algorithm.dd, dd_expected)
self.assertEqual(algorithm.ff, ff_expected)
parent_expected = {0: 1, 1: None, 2: 5, 3: 6, 4: 0, 5: 1, 6: 5, 7: 6}
self.assertEqual(algorithm.parent, parent_expected)
self.assertEqual(algorithm.path(1, 7), [1, 5, 6, 7])
self.assertEqual(algorithm.path(1, 4), [1, 0, 4])
#algorithm.dag.show()
self.assertEqual(algorithm.dag.v(), self.N)
self.assertEqual(algorithm.dag.e(), self.N-1)
self.assertTrue(algorithm.dag.is_directed())
for edge in algorithm.dag.iteredges():
self.assertTrue(self.G.has_edge(edge))
self.assertEqual(edge.weight, self.G.weight(edge))
def test_dfs_with_recursion(self):
self.assertEqual(self.G.v(), self.N)
pre_order = []
post_order = []
algorithm = DFSWithRecursion(self.G)
algorithm.run(1, pre_action=lambda node: pre_order.append(node),
post_action=lambda node: post_order.append(node))
pre_order_expected = [1, 0, 4, 5, 2, 3, 6, 7]
post_order_expected = [4, 0, 7, 6, 3, 2, 5, 1]
self.assertEqual(pre_order, pre_order_expected)
self.assertEqual(post_order, post_order_expected)
dd_expected = {0: 2, 1: 1, 2: 7, 3: 8, 4: 3, 5: 6, 6: 9, 7: 10}
ff_expected = {0: 5, 1: 16, 2: 14, 3: 13, 4: 4, 5: 15, 6: 12, 7: 11}
self.assertEqual(algorithm.dd, dd_expected)
self.assertEqual(algorithm.ff, ff_expected)
parent_expected = {0: 1, 1: None, 2: 5, 3: 2, 4: 0, 5: 1, 6: 3, 7: 6}
self.assertEqual(algorithm.parent, parent_expected)
self.assertEqual(algorithm.path(1, 7), [1, 5, 2, 3, 6, 7])
self.assertEqual(algorithm.path(1, 4), [1, 0, 4])
#algorithm.dag.show()
self.assertEqual(algorithm.dag.v(), self.N)
self.assertEqual(algorithm.dag.e(), self.N-1)
self.assertTrue(algorithm.dag.is_directed())
for edge in algorithm.dag.iteredges():
self.assertTrue(self.G.has_edge(edge))
self.assertEqual(edge.weight, self.G.weight(edge))
def test_simple_dfs_with_recursion(self):
self.assertEqual(self.G.v(), self.N)
pre_order = []
post_order = []
algorithm = SimpleDFS(self.G)
algorithm.run(1, pre_action=lambda node: pre_order.append(node),
post_action=lambda node: post_order.append(node))
pre_order_expected = [1, 0, 4, 5, 2, 3, 6, 7]
post_order_expected = [4, 0, 7, 6, 3, 2, 5, 1]
self.assertEqual(pre_order, pre_order_expected)
self.assertEqual(post_order, post_order_expected)
parent_expected = {0: 1, 1: None, 2: 5, 3: 2, 4: 0, 5: 1, 6: 3, 7: 6}
self.assertEqual(algorithm.parent, parent_expected)
self.assertEqual(algorithm.path(1, 7), [1, 5, 2, 3, 6, 7])
self.assertEqual(algorithm.path(1, 4), [1, 0, 4])
#algorithm.dag.show()
self.assertEqual(algorithm.dag.v(), self.N)
self.assertEqual(algorithm.dag.e(), self.N-1)
self.assertTrue(algorithm.dag.is_directed())
for edge in algorithm.dag.iteredges():
self.assertTrue(self.G.has_edge(edge))
self.assertEqual(edge.weight, self.G.weight(edge))
def tearDown(self): pass
if __name__ == "__main__":
unittest.main()
# EOF | graphtheory/traversing/tests/test_dfs.py |
import unittest
from graphtheory.structures.edges import Edge
from graphtheory.structures.graphs import Graph
from graphtheory.traversing.dfs import DFSWithStack
from graphtheory.traversing.dfs import DFSWithRecursion
from graphtheory.traversing.dfs import SimpleDFS
# 0---1 2---3
# | | / | / |
# 4 5---6---7
class TestDFS(unittest.TestCase):
def setUp(self):
# The graph from Cormen p.607
self.N = 8 # number of nodes
self.G = Graph(self.N)
self.nodes = range(self.N)
self.edges = [
Edge(0, 4, 2), Edge(0, 1, 3), Edge(1, 5, 4), Edge(5, 2, 5),
Edge(5, 6, 6), Edge(2, 6, 7), Edge(2, 3, 8), Edge(6, 3, 9),
Edge(6, 7, 10), Edge(3, 7, 11)]
for node in self.nodes:
self.G.add_node(node)
for edge in self.edges:
self.G.add_edge(edge)
#print self.G
#self.G.show()
def test_dfs_with_stack(self):
self.assertEqual(self.G.v(), self.N)
pre_order = []
post_order = []
algorithm = DFSWithStack(self.G)
algorithm.run(1, pre_action=lambda node: pre_order.append(node),
post_action=lambda node: post_order.append(node))
pre_order_expected = [1, 0, 5, 2, 6, 3, 7, 4]
post_order_expected = [1, 5, 6, 7, 3, 2, 0, 4]
self.assertEqual(pre_order, pre_order_expected)
self.assertEqual(post_order, post_order_expected)
dd_expected = {0: 2, 1: 1, 2: 5, 3: 8, 4: 14, 5: 3, 6: 6, 7: 9}
ff_expected = {0: 15, 1: 4, 2: 13, 3: 12, 4: 16, 5: 7, 6: 10, 7: 11}
self.assertEqual(algorithm.dd, dd_expected)
self.assertEqual(algorithm.ff, ff_expected)
parent_expected = {0: 1, 1: None, 2: 5, 3: 6, 4: 0, 5: 1, 6: 5, 7: 6}
self.assertEqual(algorithm.parent, parent_expected)
self.assertEqual(algorithm.path(1, 7), [1, 5, 6, 7])
self.assertEqual(algorithm.path(1, 4), [1, 0, 4])
#algorithm.dag.show()
self.assertEqual(algorithm.dag.v(), self.N)
self.assertEqual(algorithm.dag.e(), self.N-1)
self.assertTrue(algorithm.dag.is_directed())
for edge in algorithm.dag.iteredges():
self.assertTrue(self.G.has_edge(edge))
self.assertEqual(edge.weight, self.G.weight(edge))
def test_dfs_with_recursion(self):
self.assertEqual(self.G.v(), self.N)
pre_order = []
post_order = []
algorithm = DFSWithRecursion(self.G)
algorithm.run(1, pre_action=lambda node: pre_order.append(node),
post_action=lambda node: post_order.append(node))
pre_order_expected = [1, 0, 4, 5, 2, 3, 6, 7]
post_order_expected = [4, 0, 7, 6, 3, 2, 5, 1]
self.assertEqual(pre_order, pre_order_expected)
self.assertEqual(post_order, post_order_expected)
dd_expected = {0: 2, 1: 1, 2: 7, 3: 8, 4: 3, 5: 6, 6: 9, 7: 10}
ff_expected = {0: 5, 1: 16, 2: 14, 3: 13, 4: 4, 5: 15, 6: 12, 7: 11}
self.assertEqual(algorithm.dd, dd_expected)
self.assertEqual(algorithm.ff, ff_expected)
parent_expected = {0: 1, 1: None, 2: 5, 3: 2, 4: 0, 5: 1, 6: 3, 7: 6}
self.assertEqual(algorithm.parent, parent_expected)
self.assertEqual(algorithm.path(1, 7), [1, 5, 2, 3, 6, 7])
self.assertEqual(algorithm.path(1, 4), [1, 0, 4])
#algorithm.dag.show()
self.assertEqual(algorithm.dag.v(), self.N)
self.assertEqual(algorithm.dag.e(), self.N-1)
self.assertTrue(algorithm.dag.is_directed())
for edge in algorithm.dag.iteredges():
self.assertTrue(self.G.has_edge(edge))
self.assertEqual(edge.weight, self.G.weight(edge))
def test_simple_dfs_with_recursion(self):
self.assertEqual(self.G.v(), self.N)
pre_order = []
post_order = []
algorithm = SimpleDFS(self.G)
algorithm.run(1, pre_action=lambda node: pre_order.append(node),
post_action=lambda node: post_order.append(node))
pre_order_expected = [1, 0, 4, 5, 2, 3, 6, 7]
post_order_expected = [4, 0, 7, 6, 3, 2, 5, 1]
self.assertEqual(pre_order, pre_order_expected)
self.assertEqual(post_order, post_order_expected)
parent_expected = {0: 1, 1: None, 2: 5, 3: 2, 4: 0, 5: 1, 6: 3, 7: 6}
self.assertEqual(algorithm.parent, parent_expected)
self.assertEqual(algorithm.path(1, 7), [1, 5, 2, 3, 6, 7])
self.assertEqual(algorithm.path(1, 4), [1, 0, 4])
#algorithm.dag.show()
self.assertEqual(algorithm.dag.v(), self.N)
self.assertEqual(algorithm.dag.e(), self.N-1)
self.assertTrue(algorithm.dag.is_directed())
for edge in algorithm.dag.iteredges():
self.assertTrue(self.G.has_edge(edge))
self.assertEqual(edge.weight, self.G.weight(edge))
def tearDown(self): pass
if __name__ == "__main__":
unittest.main()
# EOF | 0.455683 | 0.743354 |
# This python script uses `pub get --offline` to fill in
# .dart_tool/package_config.json files for Dart packages in the tree whose
# dependencies should be entirely resolved without requesting data from pub.dev.
# This allows us to be certain that the Dart code we are pulling for these
# packages is explicitly fetched by `gclient sync` rather than implicitly
# fetched by pub version solving, and pub fetching transitive dependencies.
import json
import os
import subprocess
import sys
ALL_PACKAGES = [
os.path.join("src", "flutter", "ci"),
os.path.join("src", "flutter", "flutter_frontend_server"),
os.path.join("src", "flutter", "shell", "vmservice"),
os.path.join("src", "flutter", "testing", "benchmark"),
os.path.join("src", "flutter", "testing", "dart"),
os.path.join("src", "flutter", "testing", "litetest"),
os.path.join("src", "flutter", "testing", "android_background_image"),
os.path.join("src", "flutter", "testing", "scenario_app"),
os.path.join("src", "flutter", "testing", "smoke_test_failure"),
os.path.join("src", "flutter", "testing", "symbols"),
os.path.join("src", "flutter", "tools", "api_check"),
os.path.join("src", "flutter", "tools", "android_lint"),
os.path.join("src", "flutter", "tools", "clang_tidy"),
os.path.join("src", "flutter", "tools", "const_finder"),
os.path.join("src", "flutter", "tools", "githooks"),
os.path.join("src", "flutter", "tools", "licenses"),
]
def FetchPackage(pub, package):
try:
subprocess.check_output(pub, cwd=package, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print("'%s' failed in '%s' with status %d:\n%s" %
(' '.join(pub), package, err.returncode, err.output))
return 1
return 0
def CheckPackage(package):
package_config = os.path.join(package, ".dart_tool", "package_config.json")
pub_count = 0
with open(package_config) as f:
data_dict = json.load(f)
packages_data = data_dict["packages"]
for package_data in packages_data:
package_uri = package_data["rootUri"]
package_name = package_data["name"]
if ".pub-cache" in package_uri and "pub.dartlang.org" in package_uri:
print("Error: package '%s' was fetched from pub" % package_name)
pub_count = pub_count + 1
if pub_count > 0:
print("Error: %d packages were fetched from pub for %s" % (pub_count, package))
print("Please fix the pubspec.yaml for %s "
"so that all dependencies are path dependencies" % package)
return pub_count
def Main():
leading = os.path.join("src", "third_party", "dart", "tools", "sdks", "dart-sdk", "bin")
pub = "pub"
if os.name == "nt":
pub = "pub.bat"
pubcmd = [os.path.abspath(os.path.join(leading, pub)), "get", "--offline"]
pub_count = 0
for package in ALL_PACKAGES:
if FetchPackage(pubcmd, package) != 0:
return 1
pub_count = pub_count + CheckPackage(package)
if pub_count > 0:
return 1
return 0
if __name__ == '__main__':
sys.exit(Main()) | tools/pub_get_offline.py |
# This python script uses `pub get --offline` to fill in
# .dart_tool/package_config.json files for Dart packages in the tree whose
# dependencies should be entirely resolved without requesting data from pub.dev.
# This allows us to be certain that the Dart code we are pulling for these
# packages is explicitly fetched by `gclient sync` rather than implicitly
# fetched by pub version solving, and pub fetching transitive dependencies.
import json
import os
import subprocess
import sys
ALL_PACKAGES = [
os.path.join("src", "flutter", "ci"),
os.path.join("src", "flutter", "flutter_frontend_server"),
os.path.join("src", "flutter", "shell", "vmservice"),
os.path.join("src", "flutter", "testing", "benchmark"),
os.path.join("src", "flutter", "testing", "dart"),
os.path.join("src", "flutter", "testing", "litetest"),
os.path.join("src", "flutter", "testing", "android_background_image"),
os.path.join("src", "flutter", "testing", "scenario_app"),
os.path.join("src", "flutter", "testing", "smoke_test_failure"),
os.path.join("src", "flutter", "testing", "symbols"),
os.path.join("src", "flutter", "tools", "api_check"),
os.path.join("src", "flutter", "tools", "android_lint"),
os.path.join("src", "flutter", "tools", "clang_tidy"),
os.path.join("src", "flutter", "tools", "const_finder"),
os.path.join("src", "flutter", "tools", "githooks"),
os.path.join("src", "flutter", "tools", "licenses"),
]
def FetchPackage(pub, package):
try:
subprocess.check_output(pub, cwd=package, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print("'%s' failed in '%s' with status %d:\n%s" %
(' '.join(pub), package, err.returncode, err.output))
return 1
return 0
def CheckPackage(package):
package_config = os.path.join(package, ".dart_tool", "package_config.json")
pub_count = 0
with open(package_config) as f:
data_dict = json.load(f)
packages_data = data_dict["packages"]
for package_data in packages_data:
package_uri = package_data["rootUri"]
package_name = package_data["name"]
if ".pub-cache" in package_uri and "pub.dartlang.org" in package_uri:
print("Error: package '%s' was fetched from pub" % package_name)
pub_count = pub_count + 1
if pub_count > 0:
print("Error: %d packages were fetched from pub for %s" % (pub_count, package))
print("Please fix the pubspec.yaml for %s "
"so that all dependencies are path dependencies" % package)
return pub_count
def Main():
leading = os.path.join("src", "third_party", "dart", "tools", "sdks", "dart-sdk", "bin")
pub = "pub"
if os.name == "nt":
pub = "pub.bat"
pubcmd = [os.path.abspath(os.path.join(leading, pub)), "get", "--offline"]
pub_count = 0
for package in ALL_PACKAGES:
if FetchPackage(pubcmd, package) != 0:
return 1
pub_count = pub_count + CheckPackage(package)
if pub_count > 0:
return 1
return 0
if __name__ == '__main__':
sys.exit(Main()) | 0.590897 | 0.310531 |
import getpass
import logging
import os
import random
import string
import ee
import pytest
from builtins import input
from gee_asset_manager.batch_remover import delete
from gee_asset_manager.batch_uploader import upload
logging.basicConfig(level=logging.INFO)
class memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
try:
return self.memoized[args]
except KeyError:
self.memoized[args] = self.function(*args)
return self.memoized[args]
def get_random_string(length):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(length))
@memoize
def mockreturn_pass():
return input("Password: ")
@memoize
def get_username():
return input("\nUser name: ")
@pytest.fixture(scope='module')
def setup_testfolder():
ee.Initialize()
root = ee.data.getAssetRoots()[0]['id']
testfolder_name = root + '/test_geebam_' + get_random_string(8)
ee.data.createAsset({'type': ee.data.ASSET_TYPE_FOLDER}, testfolder_name)
logging.info('Setting up test folder %s', testfolder_name)
return testfolder_name
def test_upload_with_metadata(monkeypatch, setup_testfolder):
logging.info('Upload test. WARNING. Requires user name and password, which will be passed in open text.')
username = get_username()
monkeypatch.setattr(getpass, 'getpass', mockreturn_pass)
source = os.path.join(os.path.dirname(__file__), 'images')
metadata = os.path.join(os.path.dirname(__file__), 'images', 'metadata.csv')
dest = setup_testfolder + '/test_upload_with_metadata'
multipart = False
nodata = None
logging.info('Testing upload with metadata')
upload(user=username, source_path=source, destination_path=dest, metadata_path=metadata, multipart_upload=multipart, nodata_value=nodata)
def test_upload_with_nodata_multipart(monkeypatch, setup_testfolder):
username = get_username()
monkeypatch.setattr(getpass, 'getpass', mockreturn_pass)
source = os.path.join(os.path.dirname(__file__), 'images')
dest = setup_testfolder + '/test_upload_with_nodata_multipart'
multipart = True
nodata = 42
logging.info('Testing upload with nodata and multipart option')
upload(user=username, source_path=source, destination_path=dest, multipart_upload=multipart, nodata_value=nodata)
def test_delete(setup_testfolder):
ee.data.createAsset({'type': ee.data.ASSET_TYPE_FOLDER}, setup_testfolder + '/one_more_to_delete')
logging.info('Removing test directory')
delete(setup_testfolder)
info = ee.data.getInfo(setup_testfolder)
assert info == None | tests/test_upload.py | import getpass
import logging
import os
import random
import string
import ee
import pytest
from builtins import input
from gee_asset_manager.batch_remover import delete
from gee_asset_manager.batch_uploader import upload
logging.basicConfig(level=logging.INFO)
class memoize:
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args):
try:
return self.memoized[args]
except KeyError:
self.memoized[args] = self.function(*args)
return self.memoized[args]
def get_random_string(length):
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(length))
@memoize
def mockreturn_pass():
return input("Password: ")
@memoize
def get_username():
return input("\nUser name: ")
@pytest.fixture(scope='module')
def setup_testfolder():
ee.Initialize()
root = ee.data.getAssetRoots()[0]['id']
testfolder_name = root + '/test_geebam_' + get_random_string(8)
ee.data.createAsset({'type': ee.data.ASSET_TYPE_FOLDER}, testfolder_name)
logging.info('Setting up test folder %s', testfolder_name)
return testfolder_name
def test_upload_with_metadata(monkeypatch, setup_testfolder):
logging.info('Upload test. WARNING. Requires user name and password, which will be passed in open text.')
username = get_username()
monkeypatch.setattr(getpass, 'getpass', mockreturn_pass)
source = os.path.join(os.path.dirname(__file__), 'images')
metadata = os.path.join(os.path.dirname(__file__), 'images', 'metadata.csv')
dest = setup_testfolder + '/test_upload_with_metadata'
multipart = False
nodata = None
logging.info('Testing upload with metadata')
upload(user=username, source_path=source, destination_path=dest, metadata_path=metadata, multipart_upload=multipart, nodata_value=nodata)
def test_upload_with_nodata_multipart(monkeypatch, setup_testfolder):
username = get_username()
monkeypatch.setattr(getpass, 'getpass', mockreturn_pass)
source = os.path.join(os.path.dirname(__file__), 'images')
dest = setup_testfolder + '/test_upload_with_nodata_multipart'
multipart = True
nodata = 42
logging.info('Testing upload with nodata and multipart option')
upload(user=username, source_path=source, destination_path=dest, multipart_upload=multipart, nodata_value=nodata)
def test_delete(setup_testfolder):
ee.data.createAsset({'type': ee.data.ASSET_TYPE_FOLDER}, setup_testfolder + '/one_more_to_delete')
logging.info('Removing test directory')
delete(setup_testfolder)
info = ee.data.getInfo(setup_testfolder)
assert info == None | 0.283285 | 0.101545 |
import ast
import os
import sys
from logging import getLogger
from thonny.plugins.cpython_backend import MainCPythonBackend, get_backend
logger = getLogger(__name__)
def augment_ast(root):
mode = os.environ.get("PGZERO_MODE", "False")
assert mode != "False"
warning_prelude = "WARNING: Pygame Zero mode is turned on (Run → Pygame Zero mode)"
try:
import pgzero # @UnusedImport
except ImportError:
if mode == "True":
print(
warning_prelude
+ ",\nbut pgzero module is not found. Running program in regular mode.\n",
file=sys.stderr,
)
else:
assert mode == "auto"
return
# Check if draw is defined
for stmt in root.body:
if isinstance(stmt, ast.FunctionDef) and stmt.name == "draw":
break
else:
if mode == "auto":
return
else:
print(
warning_prelude
+ ",\nbut your program doesn't look like usual Pygame Zero program\n"
+ "(draw function is missing).\n",
file=sys.stderr,
)
# need more checks in auto mode
if mode == "auto":
# check that draw method is not called in the code
for node in ast.walk(root):
if (
isinstance(node, ast.Call)
and isinstance(node.func, ast.Name)
and node.func.id == "draw"
):
return
# prepend "import pgzrun as __pgzrun"
imp = ast.Import([ast.alias("pgzrun", "__pgzrun")])
imp.lineno = 0
imp.col_offset = 0
ast.fix_missing_locations(imp)
imp.tags = {"ignore"}
root.body.insert(0, imp)
# append "__pgzrun.go()"
go = ast.Expr(
ast.Call(ast.Attribute(ast.Name("__pgzrun", ast.Load()), "go", ast.Load()), [], [])
)
go.lineno = 1000000
go.col_offset = 0
ast.fix_missing_locations(go)
go.tags = {"ignore"}
root.body.append(go)
def patched_editor_autocomplete(self, cmd):
logger.debug("Starting patched _cmd_editor_autocomplete")
# Make extra builtins visible for Jedi
prefix = "from pgzero.builtins import *\n"
cmd["source"] = prefix + cmd["source"]
cmd["row"] = cmd["row"] + 1
result = get_backend()._original_editor_autocomplete(cmd)
result["row"] = result["row"] - 1
result["source"] = result["source"][len(prefix) :]
return result
def load_plugin():
if os.environ.get("PGZERO_MODE", "False").lower() == "false":
return
get_backend().add_ast_postprocessor(augment_ast)
MainCPythonBackend._original_editor_autocomplete = MainCPythonBackend._cmd_editor_autocomplete
MainCPythonBackend._cmd_editor_autocomplete = patched_editor_autocomplete | thonny/plugins/backend/pgzero_backend.py | import ast
import os
import sys
from logging import getLogger
from thonny.plugins.cpython_backend import MainCPythonBackend, get_backend
logger = getLogger(__name__)
def augment_ast(root):
mode = os.environ.get("PGZERO_MODE", "False")
assert mode != "False"
warning_prelude = "WARNING: Pygame Zero mode is turned on (Run → Pygame Zero mode)"
try:
import pgzero # @UnusedImport
except ImportError:
if mode == "True":
print(
warning_prelude
+ ",\nbut pgzero module is not found. Running program in regular mode.\n",
file=sys.stderr,
)
else:
assert mode == "auto"
return
# Check if draw is defined
for stmt in root.body:
if isinstance(stmt, ast.FunctionDef) and stmt.name == "draw":
break
else:
if mode == "auto":
return
else:
print(
warning_prelude
+ ",\nbut your program doesn't look like usual Pygame Zero program\n"
+ "(draw function is missing).\n",
file=sys.stderr,
)
# need more checks in auto mode
if mode == "auto":
# check that draw method is not called in the code
for node in ast.walk(root):
if (
isinstance(node, ast.Call)
and isinstance(node.func, ast.Name)
and node.func.id == "draw"
):
return
# prepend "import pgzrun as __pgzrun"
imp = ast.Import([ast.alias("pgzrun", "__pgzrun")])
imp.lineno = 0
imp.col_offset = 0
ast.fix_missing_locations(imp)
imp.tags = {"ignore"}
root.body.insert(0, imp)
# append "__pgzrun.go()"
go = ast.Expr(
ast.Call(ast.Attribute(ast.Name("__pgzrun", ast.Load()), "go", ast.Load()), [], [])
)
go.lineno = 1000000
go.col_offset = 0
ast.fix_missing_locations(go)
go.tags = {"ignore"}
root.body.append(go)
def patched_editor_autocomplete(self, cmd):
logger.debug("Starting patched _cmd_editor_autocomplete")
# Make extra builtins visible for Jedi
prefix = "from pgzero.builtins import *\n"
cmd["source"] = prefix + cmd["source"]
cmd["row"] = cmd["row"] + 1
result = get_backend()._original_editor_autocomplete(cmd)
result["row"] = result["row"] - 1
result["source"] = result["source"][len(prefix) :]
return result
def load_plugin():
if os.environ.get("PGZERO_MODE", "False").lower() == "false":
return
get_backend().add_ast_postprocessor(augment_ast)
MainCPythonBackend._original_editor_autocomplete = MainCPythonBackend._cmd_editor_autocomplete
MainCPythonBackend._cmd_editor_autocomplete = patched_editor_autocomplete | 0.252753 | 0.161916 |
import pandas as pd
from abc import ABC, abstractmethod
class RegionalLoadModel(ABC):
"""
An interface for different kinds of load models (per region)
"""
ALL_REQUEST_TYPES_WILDCARD = '*'
_Registry = {}
@classmethod
def register(cls, name : str):
def decorator(regional_load_model_class):
cls._Registry[name] = regional_load_model_class
return regional_load_model_class
return decorator
@classmethod
def get(cls, name : str):
if not name in cls._Registry:
raise ValueError(f'An attempt to use a non-existent regional load model: {name}')
return cls._Registry[name]
def __init__(self, region_name : str, generation_bucket : pd.Timedelta,
simulation_step : pd.Timedelta, reqs_processing_infos : dict, batch_size : int):
self.region_name = region_name
self.generation_bucket = generation_bucket
self.simulation_step = simulation_step
self.reqs_processing_infos = reqs_processing_infos
self.batch_size = batch_size
self.load = {}
@abstractmethod
def generate_requests(self, timestamp : pd.Timestamp):
pass
def get_stat(self):
return { req_type : pd.DataFrame(dict_load).set_index('datetime') for req_type, dict_load in self.load.items() }
def _update_stat(self, timestamp : pd.Timestamp, req_type : str, reqs_num : int):
""" Stat is stored as dicts to improve the performance that suffers when using dataframes frequently """
if req_type in self.load:
self.load[req_type]['datetime'].append(timestamp)
self.load[req_type]['value'].append(reqs_num)
else:
self.load[req_type] = {'datetime': [timestamp], 'value': [reqs_num]}
def get_requests_count_per_unit_of_time(self, req_type : str,
averaging_interval : pd.Timedelta = pd.Timedelta(10, unit = 'ms')):
req_types_to_consider = [req_type]
if req_type == self.__class__.ALL_REQUEST_TYPES_WILDCARD:
req_types_to_consider = self.load.keys()
elif not req_type in self.load:
raise ValueError(f'No request of type {req_type} found in the load stats for region {self.region_name}')
request_counts = pd.DataFrame(columns = ['value'], index = pd.to_datetime([]))
for req_type in req_types_to_consider:
cur_request_counts = pd.DataFrame(self.load[req_type]).set_index('datetime')
# Aligning the time series
common_index = cur_request_counts.index.union(request_counts.index)#.astype(cur_request_counts.index.dtype)
cur_request_counts = cur_request_counts.reindex(common_index, fill_value = 0)
request_counts = request_counts.reindex(common_index, fill_value = 0)
request_counts += cur_request_counts
return request_counts.rolling(averaging_interval).mean()
from .load_models import * | autoscalingsim/load/regional_load_model/regional_load_model.py | import pandas as pd
from abc import ABC, abstractmethod
class RegionalLoadModel(ABC):
"""
An interface for different kinds of load models (per region)
"""
ALL_REQUEST_TYPES_WILDCARD = '*'
_Registry = {}
@classmethod
def register(cls, name : str):
def decorator(regional_load_model_class):
cls._Registry[name] = regional_load_model_class
return regional_load_model_class
return decorator
@classmethod
def get(cls, name : str):
if not name in cls._Registry:
raise ValueError(f'An attempt to use a non-existent regional load model: {name}')
return cls._Registry[name]
def __init__(self, region_name : str, generation_bucket : pd.Timedelta,
simulation_step : pd.Timedelta, reqs_processing_infos : dict, batch_size : int):
self.region_name = region_name
self.generation_bucket = generation_bucket
self.simulation_step = simulation_step
self.reqs_processing_infos = reqs_processing_infos
self.batch_size = batch_size
self.load = {}
@abstractmethod
def generate_requests(self, timestamp : pd.Timestamp):
pass
def get_stat(self):
return { req_type : pd.DataFrame(dict_load).set_index('datetime') for req_type, dict_load in self.load.items() }
def _update_stat(self, timestamp : pd.Timestamp, req_type : str, reqs_num : int):
""" Stat is stored as dicts to improve the performance that suffers when using dataframes frequently """
if req_type in self.load:
self.load[req_type]['datetime'].append(timestamp)
self.load[req_type]['value'].append(reqs_num)
else:
self.load[req_type] = {'datetime': [timestamp], 'value': [reqs_num]}
def get_requests_count_per_unit_of_time(self, req_type : str,
averaging_interval : pd.Timedelta = pd.Timedelta(10, unit = 'ms')):
req_types_to_consider = [req_type]
if req_type == self.__class__.ALL_REQUEST_TYPES_WILDCARD:
req_types_to_consider = self.load.keys()
elif not req_type in self.load:
raise ValueError(f'No request of type {req_type} found in the load stats for region {self.region_name}')
request_counts = pd.DataFrame(columns = ['value'], index = pd.to_datetime([]))
for req_type in req_types_to_consider:
cur_request_counts = pd.DataFrame(self.load[req_type]).set_index('datetime')
# Aligning the time series
common_index = cur_request_counts.index.union(request_counts.index)#.astype(cur_request_counts.index.dtype)
cur_request_counts = cur_request_counts.reindex(common_index, fill_value = 0)
request_counts = request_counts.reindex(common_index, fill_value = 0)
request_counts += cur_request_counts
return request_counts.rolling(averaging_interval).mean()
from .load_models import * | 0.696371 | 0.320688 |
import requests
from bs4 import BeautifulSoup as soup
class Product:
def __init__(self, name, price, location, img, link):
self.name = name
self.price = price
self.location = location
self.img = img
self.link = link
def __str__(self):
return "name : " + self.name + "\n" + "price : " + str(self.price) + "\n" + "location : " + str(self.location) + "\n" + "img : " + str(self.img) + "\n" + "link : " + str(self.link) + "\n"
products = []
urls = []
fruits = ['capsuni', 'capsune', 'cirese', 'mere', 'pere', 'gutui', 'prune', 'nuci', 'struguri',
'caise', 'pepene', 'zmeura', 'mure', 'afine', 'piersici', 'nectarine', 'visine',
'catina']
vegetables = ['cartofi', 'ardei', 'gulie', 'conopida', 'brocoli', 'rosii', 'ciuperci', 'telina',
'vinete', 'salata', 'morcov', 'fasole', 'usturoi', 'ceapa', 'mazare', 'castravete',
'ridiche', 'varza', 'pastarnac', 'patrunjel', 'marar', 'leustean', 'loboda', 'urzici',
'stevie', 'sfecla', 'dovleac']
pages = ['?page=1', '?page=2', '?page=3']
for vegetable in vegetables:
for page in pages:
url = 'https://www.olx.ro/anunturi-agricole/alimentatie-produse-bio/legume-fructe/q-' + str(vegetable) + '/' + str(page)
urls.append(url)
for fruit in fruits:
for page in pages:
url = 'https://www.olx.ro/anunturi-agricole/alimentatie-produse-bio/legume-fructe/q-' + str(fruit) + '/' + str(page)
urls.append(url)
for url in urls:
try:
page_html = requests.get(url)
page_soup = soup(page_html.text, "html.parser")
containers = page_soup.findAll("div",{"class":"offer-wrapper"})
except TypeError:
print("Not possible" + url)
except:
pass
for contain in containers:
items = contain.findAll("td")
try:
name = items[0].a.img["alt"]
img = items[0].a.img["src"]
link = items[1].div.h3.a["href"]
price = items[2].div.p.strong.text
location = items[3].div.p.small.span.text
product = Product(name,price,location,img,link)
products.append(product)
except TypeError:
print("Not possible")
except:
pass
filename = "products.csv"
headers = "Name, Price, Location, Image, Link\n"
with open(filename, "w") as f:
f.write(headers)
for product in products:
f.write(product.name.replace(",",".") + "," + str(product.price).replace(",",".") + "," + product.location.replace(",",".") + "," + product.img + "," + product.link + "\n") | webscrape/check_olx.py | import requests
from bs4 import BeautifulSoup as soup
class Product:
def __init__(self, name, price, location, img, link):
self.name = name
self.price = price
self.location = location
self.img = img
self.link = link
def __str__(self):
return "name : " + self.name + "\n" + "price : " + str(self.price) + "\n" + "location : " + str(self.location) + "\n" + "img : " + str(self.img) + "\n" + "link : " + str(self.link) + "\n"
products = []
urls = []
fruits = ['capsuni', 'capsune', 'cirese', 'mere', 'pere', 'gutui', 'prune', 'nuci', 'struguri',
'caise', 'pepene', 'zmeura', 'mure', 'afine', 'piersici', 'nectarine', 'visine',
'catina']
vegetables = ['cartofi', 'ardei', 'gulie', 'conopida', 'brocoli', 'rosii', 'ciuperci', 'telina',
'vinete', 'salata', 'morcov', 'fasole', 'usturoi', 'ceapa', 'mazare', 'castravete',
'ridiche', 'varza', 'pastarnac', 'patrunjel', 'marar', 'leustean', 'loboda', 'urzici',
'stevie', 'sfecla', 'dovleac']
pages = ['?page=1', '?page=2', '?page=3']
for vegetable in vegetables:
for page in pages:
url = 'https://www.olx.ro/anunturi-agricole/alimentatie-produse-bio/legume-fructe/q-' + str(vegetable) + '/' + str(page)
urls.append(url)
for fruit in fruits:
for page in pages:
url = 'https://www.olx.ro/anunturi-agricole/alimentatie-produse-bio/legume-fructe/q-' + str(fruit) + '/' + str(page)
urls.append(url)
for url in urls:
try:
page_html = requests.get(url)
page_soup = soup(page_html.text, "html.parser")
containers = page_soup.findAll("div",{"class":"offer-wrapper"})
except TypeError:
print("Not possible" + url)
except:
pass
for contain in containers:
items = contain.findAll("td")
try:
name = items[0].a.img["alt"]
img = items[0].a.img["src"]
link = items[1].div.h3.a["href"]
price = items[2].div.p.strong.text
location = items[3].div.p.small.span.text
product = Product(name,price,location,img,link)
products.append(product)
except TypeError:
print("Not possible")
except:
pass
filename = "products.csv"
headers = "Name, Price, Location, Image, Link\n"
with open(filename, "w") as f:
f.write(headers)
for product in products:
f.write(product.name.replace(",",".") + "," + str(product.price).replace(",",".") + "," + product.location.replace(",",".") + "," + product.img + "," + product.link + "\n") | 0.119845 | 0.084153 |
import operator
import os
import numpy as np
import tensorflow as tf
import test_parameters as param
import util as u
graph = tf.get_default_graph()
def predict_best_tail(test_triple, full_triple_list, full_entity_list, entity_embeddings_txt,
entity_embeddings_img,
full_relation_embeddings):
results = {}
gt_head = test_triple[0]
gt_head_embeddings_txt = entity_embeddings_txt[gt_head]
gt_head_embeddings_img = entity_embeddings_img[gt_head]
gt_rel = test_triple[2]
gt_relation_embeddings = full_relation_embeddings[gt_rel]
gt_tail_org = test_triple[1]
gt_tail = u.get_correct_tails(gt_head, gt_rel, full_triple_list)
head_embeddings_list_txt = []
head_embeddings_list_img = []
full_relation_embeddings = []
tails_embeddings_list_txt = []
tails_embeddings_list_img = []
for i in range(len(full_entity_list)):
head_embeddings_list_txt.append(gt_head_embeddings_txt)
head_embeddings_list_img.append(gt_head_embeddings_img)
full_relation_embeddings.append(gt_relation_embeddings)
tails_embeddings_list_txt.append(entity_embeddings_txt[full_entity_list[i]])
tails_embeddings_list_img.append(entity_embeddings_img[full_entity_list[i]])
head_embeddings_list_txt = np.asarray(head_embeddings_list_txt)
head_embeddings_list_img = np.asarray(head_embeddings_list_img)
full_relation_embeddings = np.asarray(full_relation_embeddings)
tails_embeddings_list_txt = np.asarray(tails_embeddings_list_txt)
tails_embeddings_list_img = np.asarray(tails_embeddings_list_img)
predictions = predict_tail(head_embeddings_list_txt, head_embeddings_list_img, full_relation_embeddings,
tails_embeddings_list_txt, tails_embeddings_list_img)
for i in range(0, len(predictions[0])):
results[full_entity_list[i]] = predictions[0][i]
if gt_head != gt_tail_org:
del results[gt_head]
sorted_x = sorted(results.items(), key=operator.itemgetter(1), reverse=False)
top_10_predictions = [x[0] for x in sorted_x[:10]]
sorted_keys = [x[0] for x in sorted_x]
index_correct_tail_raw = sorted_keys.index(gt_tail_org)
gt_tail_to_filter = [x for x in gt_tail if x != gt_tail_org]
# remove the correct tails from the predictions
for key in gt_tail_to_filter:
if key in results:
del results[key]
sorted_x = sorted(results.items(), key=operator.itemgetter(1), reverse=False)
sorted_keys = [x[0] for x in sorted_x]
index_tail_head_filter = sorted_keys.index(gt_tail_org)
return (index_correct_tail_raw + 1), (index_tail_head_filter + 1), top_10_predictions
def predict_tail(head_embedding_txt, head_embedding_img, relation_embedding, tails_embedding_txt, tails_embeddings_img):
r_input = graph.get_tensor_by_name("input/r_input:0")
h_pos_txt_input = graph.get_tensor_by_name("input/h_pos_txt_input:0")
t_pos_txt_input = graph.get_tensor_by_name("input/t_pos_txt_input:0")
h_pos_img_input = graph.get_tensor_by_name("input/h_pos_img_input:0")
t_pos_img_input = graph.get_tensor_by_name("input/t_pos_img_input:0")
h_r_t_pos = graph.get_tensor_by_name("cosine/pos_energy:0")
predictions = h_r_t_pos.eval(feed_dict={r_input: relation_embedding,
h_pos_txt_input: head_embedding_txt,
t_pos_txt_input: tails_embedding_txt,
h_pos_img_input: head_embedding_img,
t_pos_img_input: tails_embeddings_img
})
return [predictions]
def predict_head(tail_embeddings_list_txt, tail_embeddings_list_img, full_relation_embeddings,
heads_embeddings_list_txt, heads_embeddings_list_img):
r_input = graph.get_tensor_by_name("input/r_input:0")
h_pos_txt_input = graph.get_tensor_by_name("input/h_pos_txt_input:0")
t_pos_txt_input = graph.get_tensor_by_name("input/t_pos_txt_input:0")
h_pos_img_input = graph.get_tensor_by_name("input/h_pos_img_input:0")
t_pos_img_input = graph.get_tensor_by_name("input/t_pos_img_input:0")
t_r_h_pos = graph.get_tensor_by_name("cosine/pos_energy:0")
predictions = t_r_h_pos.eval(feed_dict={r_input: full_relation_embeddings,
h_pos_txt_input: heads_embeddings_list_txt,
t_pos_txt_input: tail_embeddings_list_txt,
h_pos_img_input: heads_embeddings_list_img,
t_pos_img_input: tail_embeddings_list_img
})
return [predictions]
def predict_best_head(test_triple, full_triple_list, full_entity_list, entity_embeddings_txt,
entity_embeddings_img,
full_relation_embeddings):
#triple: head, tail, relation
results = {}
gt_tail = test_triple[1] #tail
gt_tail_embeddings_txt = entity_embeddings_txt[gt_tail] #tail embeddings
gt_tail_embeddings_img = entity_embeddings_img[gt_tail]
gt_rel = test_triple[2]
gt_relation_embeddings = full_relation_embeddings[gt_rel]
gt_head_org = test_triple[0]
gt_head = u.get_correct_heads(gt_tail, gt_rel, full_triple_list)
tail_embeddings_list_txt = []
tail_embeddings_list_img = []
full_relation_embeddings = []
heads_embeddings_list_txt = []
heads_embeddings_list_img = []
for i in range(len(full_entity_list)):
tail_embeddings_list_txt.append(gt_tail_embeddings_txt)
tail_embeddings_list_img.append(gt_tail_embeddings_img)
full_relation_embeddings.append(gt_relation_embeddings)
heads_embeddings_list_txt.append(entity_embeddings_txt[full_entity_list[i]])
heads_embeddings_list_img.append(entity_embeddings_img[full_entity_list[i]])
tail_embeddings_list_txt = np.asarray(tail_embeddings_list_txt)
tail_embeddings_list_img = np.asarray(tail_embeddings_list_img)
full_relation_embeddings = np.asarray(full_relation_embeddings)
heads_embeddings_list_txt = np.asarray(heads_embeddings_list_txt)
heads_embeddings_list_img = np.asarray(heads_embeddings_list_img)
predictions = predict_head(tail_embeddings_list_txt, tail_embeddings_list_img, full_relation_embeddings,
heads_embeddings_list_txt, heads_embeddings_list_img)
for i in range(0, len(predictions[0])):
results[full_entity_list[i]] = predictions[0][i]
if gt_tail != gt_head_org:
del results[gt_tail]
sorted_x = sorted(results.items(), key=operator.itemgetter(1), reverse=False)
top_10_predictions = [x[0] for x in sorted_x[:10]]
sorted_keys = [x[0] for x in sorted_x]
index_correct_head_raw = sorted_keys.index(gt_head_org)
gt_tail_to_filter = [x for x in gt_head if x != gt_head_org]
# remove the correct tails from the predictions
for key in gt_tail_to_filter:
if key in results:
del results[key]
sorted_x = sorted(results.items(), key=operator.itemgetter(1), reverse=False)
sorted_keys = [x[0] for x in sorted_x]
index_head_filter = sorted_keys.index(gt_head_org)
return (index_correct_head_raw + 1), (index_head_filter + 1), top_10_predictions
############ Testing Part #######################
relation_embeddings = u.load_binary_file(param.relation_structural_embeddings_file)
entity_embeddings = u.load_binary_file(param.entity_structural_embeddings_file)
entity_embeddings_img = u.load_binary_file(param.entity_multimodal_embeddings_file)
entity_list = u.load_entity_list(param.all_triples_file, entity_embeddings)
print("#Entities", len(entity_list))
all_triples = u.load_triples(param.all_triples_file, entity_list)
all_test_triples = u.load_triples(param.test_triples_file, entity_list)
#all_test_triples = all_test_triples[:1000]
print("#Test triples", len(all_test_triples)) # Triple: head, tail, relation
tail_ma_raw = 0
tail_ma_filter = 0
tail_hits_raw = 0
tail_hits_filter = 0
head_ma_raw = 0
head_ma_filter = 0
head_hits_raw = 0
head_hits_filter = 0
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
#print("Model restored from file: %s" % param.current_model_meta_file)
avg_rank_raw = 0.0
avg_rank_filter = 0.0
hits_at_10_raw = 0.0
hits_at_10_filter = 0.0
lines = []
#new_saver = tf.train.import_meta_graph(param.model_meta_file)
# new_saver.restore(sess, param.model_weights_best_file)
saver = tf.train.import_meta_graph(param.best_valid_model_meta_file)
saver.restore(sess, tf.train.latest_checkpoint(param.checkpoint_best_valid_dir))
graph = tf.get_default_graph()
#Warning only for relation classification
#entity_list = u.load_relation_list(param.all_triples_file, entity_embeddings)
counter = 1
for triple in all_test_triples:
rank_raw, rank_filter, top_10 = predict_best_tail(triple, all_triples, entity_list, entity_embeddings,
entity_embeddings_img,
relation_embeddings)
line = triple[0] + "\t" + triple[2] + "\t" + triple[1] + "\t" + str(top_10) + "\t" + str(rank_raw) + "\t" + str(
rank_filter) + "\n"
#print(line)
lines.append(line)
print(str(counter) + "/" + str(len(all_test_triples)) + " " + str(rank_raw) + " " + str(rank_filter) )
counter +=1
avg_rank_raw += rank_raw
avg_rank_filter += rank_filter
if rank_raw <= 10:
hits_at_10_raw += 1
if rank_filter <= 10:
hits_at_10_filter += 1
avg_rank_raw /= len(all_test_triples)
avg_rank_filter /= len(all_test_triples)
hits_at_10_raw /= len(all_test_triples)
hits_at_10_filter /= len(all_test_triples)
print("MAR Raw", avg_rank_raw, "MAR Filter", avg_rank_filter)
print("Hits@10 Raw", hits_at_10_raw, "Hits@10 Filter", hits_at_10_filter)
# Write to a file
#results_file = param.result_file
results_file = param.result_file.replace(".txt","tail_prediction.txt")
if os.path.isfile(results_file):
results_file = results_file.replace(".txt", "_1.txt")
print("write the results into", results_file)
with open(results_file, "w") as f:
f.write("MAR Raw" + "\t" + str(avg_rank_raw) + "\t" + "MAR Filter" + "\t" + str(avg_rank_filter) + "\n")
f.write("Hits@10 Raw" + "\t" + str(hits_at_10_raw) + "\t" + "Hits@10 Filter" + "\t" + str(
hits_at_10_filter) + "\n" + "\n")
f.write("Head \t Relation \t Gold Tail \t Top Predicted Tails \t Raw Rank \t Filtered Rank\n")
for l in lines:
f.write(str(l))
tail_ma_raw = avg_rank_raw
tail_ma_filter = avg_rank_filter
tail_hits_raw = hits_at_10_raw
tail_hits_filter = hits_at_10_filter
avg_rank_raw = 0.0
avg_rank_filter = 0.0
hits_at_10_raw = 0.0
hits_at_10_filter = 0.0
lines = []
counter = 1
for triple in all_test_triples:
rank_raw, rank_filter, top_10 = predict_best_head(triple, all_triples, entity_list, entity_embeddings,
entity_embeddings_img,
relation_embeddings)
line = triple[1] + "\t" + triple[2] + "\t" + triple[0] + "\t" + str(top_10) + "\t" + str(rank_raw) + "\t" + str(
rank_filter) + "\n"
#print(line)
lines.append(line)
print(str(counter) + "/" + str(len(all_test_triples)) + " " + str(rank_raw) + " " + str(rank_filter))
counter += 1
avg_rank_raw += rank_raw
avg_rank_filter += rank_filter
if rank_raw <= 10:
hits_at_10_raw += 1
if rank_filter <= 10:
hits_at_10_filter += 1
avg_rank_raw /= len(all_test_triples)
avg_rank_filter /= len(all_test_triples)
hits_at_10_raw /= len(all_test_triples)
hits_at_10_filter /= len(all_test_triples)
print("MAR Raw", avg_rank_raw, "MAR Filter", avg_rank_filter)
print("Hits@10 Raw", hits_at_10_raw, "Hits@10 Filter", hits_at_10_filter)
# Write to a file
results_file = param.result_file.replace(".txt","head_prediction.txt")
if os.path.isfile(results_file):
results_file = results_file.replace(".txt", "_1.txt")
print("write the results into", results_file)
with open(results_file, "w") as f:
f.write("MAR Raw" + "\t" + str(avg_rank_raw) + "\t" + "MAR Filter" + "\t" + str(avg_rank_filter) + "\n")
f.write("Hits@10 Raw" + "\t" + str(hits_at_10_raw) + "\t" + "Hits@10 Filter" + "\t" + str(
hits_at_10_filter) + "\n" + "\n")
f.write("Tail \t Relation \t Gold Head \t Top Predicted Heads \t Raw Rank \t Filtered Rank\n")
for l in lines:
f.write(str(l))
head_ma_raw = avg_rank_raw
head_ma_filter = avg_rank_filter
head_hits_raw = hits_at_10_raw
head_hits_filter = hits_at_10_filter
print("+++++++++++++++ Evaluation Summary ++++++++++++++++")
print("MA Raw Tail \t MA Filter Tail \t Hits Raw Tail \t Hits Filter Tail")
print(str(tail_ma_raw)+"\t"+str(tail_ma_filter)+"\t"+str(tail_hits_raw)+"\t"+str(tail_hits_filter))
print("MA Raw Head \t MA Filter Head \t Hits Raw Head \t Hits Filter Head")
print(str(head_ma_raw)+"\t"+str(head_ma_filter)+"\t"+str(head_hits_raw)+"\t"+str(head_hits_filter))
print("MA Raw AVG \t MA Filter AVG \t Hits Raw AVG \t Hits Filter AVG")
avg_ma_raw = (head_ma_raw+tail_ma_raw)/2
avg_ma_filter = (head_ma_filter+tail_ma_filter)/2
avg_hits_raw = (head_hits_raw+tail_hits_raw)/2
avg_hits_filter = (head_hits_filter+tail_hits_filter)/2
print(str(avg_ma_raw)+"\t"+str(avg_ma_filter)+"\t"+str(avg_hits_raw)+"\t"+str(avg_hits_filter)) | ikrl/test.py | import operator
import os
import numpy as np
import tensorflow as tf
import test_parameters as param
import util as u
graph = tf.get_default_graph()
def predict_best_tail(test_triple, full_triple_list, full_entity_list, entity_embeddings_txt,
entity_embeddings_img,
full_relation_embeddings):
results = {}
gt_head = test_triple[0]
gt_head_embeddings_txt = entity_embeddings_txt[gt_head]
gt_head_embeddings_img = entity_embeddings_img[gt_head]
gt_rel = test_triple[2]
gt_relation_embeddings = full_relation_embeddings[gt_rel]
gt_tail_org = test_triple[1]
gt_tail = u.get_correct_tails(gt_head, gt_rel, full_triple_list)
head_embeddings_list_txt = []
head_embeddings_list_img = []
full_relation_embeddings = []
tails_embeddings_list_txt = []
tails_embeddings_list_img = []
for i in range(len(full_entity_list)):
head_embeddings_list_txt.append(gt_head_embeddings_txt)
head_embeddings_list_img.append(gt_head_embeddings_img)
full_relation_embeddings.append(gt_relation_embeddings)
tails_embeddings_list_txt.append(entity_embeddings_txt[full_entity_list[i]])
tails_embeddings_list_img.append(entity_embeddings_img[full_entity_list[i]])
head_embeddings_list_txt = np.asarray(head_embeddings_list_txt)
head_embeddings_list_img = np.asarray(head_embeddings_list_img)
full_relation_embeddings = np.asarray(full_relation_embeddings)
tails_embeddings_list_txt = np.asarray(tails_embeddings_list_txt)
tails_embeddings_list_img = np.asarray(tails_embeddings_list_img)
predictions = predict_tail(head_embeddings_list_txt, head_embeddings_list_img, full_relation_embeddings,
tails_embeddings_list_txt, tails_embeddings_list_img)
for i in range(0, len(predictions[0])):
results[full_entity_list[i]] = predictions[0][i]
if gt_head != gt_tail_org:
del results[gt_head]
sorted_x = sorted(results.items(), key=operator.itemgetter(1), reverse=False)
top_10_predictions = [x[0] for x in sorted_x[:10]]
sorted_keys = [x[0] for x in sorted_x]
index_correct_tail_raw = sorted_keys.index(gt_tail_org)
gt_tail_to_filter = [x for x in gt_tail if x != gt_tail_org]
# remove the correct tails from the predictions
for key in gt_tail_to_filter:
if key in results:
del results[key]
sorted_x = sorted(results.items(), key=operator.itemgetter(1), reverse=False)
sorted_keys = [x[0] for x in sorted_x]
index_tail_head_filter = sorted_keys.index(gt_tail_org)
return (index_correct_tail_raw + 1), (index_tail_head_filter + 1), top_10_predictions
def predict_tail(head_embedding_txt, head_embedding_img, relation_embedding, tails_embedding_txt, tails_embeddings_img):
r_input = graph.get_tensor_by_name("input/r_input:0")
h_pos_txt_input = graph.get_tensor_by_name("input/h_pos_txt_input:0")
t_pos_txt_input = graph.get_tensor_by_name("input/t_pos_txt_input:0")
h_pos_img_input = graph.get_tensor_by_name("input/h_pos_img_input:0")
t_pos_img_input = graph.get_tensor_by_name("input/t_pos_img_input:0")
h_r_t_pos = graph.get_tensor_by_name("cosine/pos_energy:0")
predictions = h_r_t_pos.eval(feed_dict={r_input: relation_embedding,
h_pos_txt_input: head_embedding_txt,
t_pos_txt_input: tails_embedding_txt,
h_pos_img_input: head_embedding_img,
t_pos_img_input: tails_embeddings_img
})
return [predictions]
def predict_head(tail_embeddings_list_txt, tail_embeddings_list_img, full_relation_embeddings,
heads_embeddings_list_txt, heads_embeddings_list_img):
r_input = graph.get_tensor_by_name("input/r_input:0")
h_pos_txt_input = graph.get_tensor_by_name("input/h_pos_txt_input:0")
t_pos_txt_input = graph.get_tensor_by_name("input/t_pos_txt_input:0")
h_pos_img_input = graph.get_tensor_by_name("input/h_pos_img_input:0")
t_pos_img_input = graph.get_tensor_by_name("input/t_pos_img_input:0")
t_r_h_pos = graph.get_tensor_by_name("cosine/pos_energy:0")
predictions = t_r_h_pos.eval(feed_dict={r_input: full_relation_embeddings,
h_pos_txt_input: heads_embeddings_list_txt,
t_pos_txt_input: tail_embeddings_list_txt,
h_pos_img_input: heads_embeddings_list_img,
t_pos_img_input: tail_embeddings_list_img
})
return [predictions]
def predict_best_head(test_triple, full_triple_list, full_entity_list, entity_embeddings_txt,
entity_embeddings_img,
full_relation_embeddings):
#triple: head, tail, relation
results = {}
gt_tail = test_triple[1] #tail
gt_tail_embeddings_txt = entity_embeddings_txt[gt_tail] #tail embeddings
gt_tail_embeddings_img = entity_embeddings_img[gt_tail]
gt_rel = test_triple[2]
gt_relation_embeddings = full_relation_embeddings[gt_rel]
gt_head_org = test_triple[0]
gt_head = u.get_correct_heads(gt_tail, gt_rel, full_triple_list)
tail_embeddings_list_txt = []
tail_embeddings_list_img = []
full_relation_embeddings = []
heads_embeddings_list_txt = []
heads_embeddings_list_img = []
for i in range(len(full_entity_list)):
tail_embeddings_list_txt.append(gt_tail_embeddings_txt)
tail_embeddings_list_img.append(gt_tail_embeddings_img)
full_relation_embeddings.append(gt_relation_embeddings)
heads_embeddings_list_txt.append(entity_embeddings_txt[full_entity_list[i]])
heads_embeddings_list_img.append(entity_embeddings_img[full_entity_list[i]])
tail_embeddings_list_txt = np.asarray(tail_embeddings_list_txt)
tail_embeddings_list_img = np.asarray(tail_embeddings_list_img)
full_relation_embeddings = np.asarray(full_relation_embeddings)
heads_embeddings_list_txt = np.asarray(heads_embeddings_list_txt)
heads_embeddings_list_img = np.asarray(heads_embeddings_list_img)
predictions = predict_head(tail_embeddings_list_txt, tail_embeddings_list_img, full_relation_embeddings,
heads_embeddings_list_txt, heads_embeddings_list_img)
for i in range(0, len(predictions[0])):
results[full_entity_list[i]] = predictions[0][i]
if gt_tail != gt_head_org:
del results[gt_tail]
sorted_x = sorted(results.items(), key=operator.itemgetter(1), reverse=False)
top_10_predictions = [x[0] for x in sorted_x[:10]]
sorted_keys = [x[0] for x in sorted_x]
index_correct_head_raw = sorted_keys.index(gt_head_org)
gt_tail_to_filter = [x for x in gt_head if x != gt_head_org]
# remove the correct tails from the predictions
for key in gt_tail_to_filter:
if key in results:
del results[key]
sorted_x = sorted(results.items(), key=operator.itemgetter(1), reverse=False)
sorted_keys = [x[0] for x in sorted_x]
index_head_filter = sorted_keys.index(gt_head_org)
return (index_correct_head_raw + 1), (index_head_filter + 1), top_10_predictions
############ Testing Part #######################
relation_embeddings = u.load_binary_file(param.relation_structural_embeddings_file)
entity_embeddings = u.load_binary_file(param.entity_structural_embeddings_file)
entity_embeddings_img = u.load_binary_file(param.entity_multimodal_embeddings_file)
entity_list = u.load_entity_list(param.all_triples_file, entity_embeddings)
print("#Entities", len(entity_list))
all_triples = u.load_triples(param.all_triples_file, entity_list)
all_test_triples = u.load_triples(param.test_triples_file, entity_list)
#all_test_triples = all_test_triples[:1000]
print("#Test triples", len(all_test_triples)) # Triple: head, tail, relation
tail_ma_raw = 0
tail_ma_filter = 0
tail_hits_raw = 0
tail_hits_filter = 0
head_ma_raw = 0
head_ma_filter = 0
head_hits_raw = 0
head_hits_filter = 0
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as sess:
#print("Model restored from file: %s" % param.current_model_meta_file)
avg_rank_raw = 0.0
avg_rank_filter = 0.0
hits_at_10_raw = 0.0
hits_at_10_filter = 0.0
lines = []
#new_saver = tf.train.import_meta_graph(param.model_meta_file)
# new_saver.restore(sess, param.model_weights_best_file)
saver = tf.train.import_meta_graph(param.best_valid_model_meta_file)
saver.restore(sess, tf.train.latest_checkpoint(param.checkpoint_best_valid_dir))
graph = tf.get_default_graph()
#Warning only for relation classification
#entity_list = u.load_relation_list(param.all_triples_file, entity_embeddings)
counter = 1
for triple in all_test_triples:
rank_raw, rank_filter, top_10 = predict_best_tail(triple, all_triples, entity_list, entity_embeddings,
entity_embeddings_img,
relation_embeddings)
line = triple[0] + "\t" + triple[2] + "\t" + triple[1] + "\t" + str(top_10) + "\t" + str(rank_raw) + "\t" + str(
rank_filter) + "\n"
#print(line)
lines.append(line)
print(str(counter) + "/" + str(len(all_test_triples)) + " " + str(rank_raw) + " " + str(rank_filter) )
counter +=1
avg_rank_raw += rank_raw
avg_rank_filter += rank_filter
if rank_raw <= 10:
hits_at_10_raw += 1
if rank_filter <= 10:
hits_at_10_filter += 1
avg_rank_raw /= len(all_test_triples)
avg_rank_filter /= len(all_test_triples)
hits_at_10_raw /= len(all_test_triples)
hits_at_10_filter /= len(all_test_triples)
print("MAR Raw", avg_rank_raw, "MAR Filter", avg_rank_filter)
print("Hits@10 Raw", hits_at_10_raw, "Hits@10 Filter", hits_at_10_filter)
# Write to a file
#results_file = param.result_file
results_file = param.result_file.replace(".txt","tail_prediction.txt")
if os.path.isfile(results_file):
results_file = results_file.replace(".txt", "_1.txt")
print("write the results into", results_file)
with open(results_file, "w") as f:
f.write("MAR Raw" + "\t" + str(avg_rank_raw) + "\t" + "MAR Filter" + "\t" + str(avg_rank_filter) + "\n")
f.write("Hits@10 Raw" + "\t" + str(hits_at_10_raw) + "\t" + "Hits@10 Filter" + "\t" + str(
hits_at_10_filter) + "\n" + "\n")
f.write("Head \t Relation \t Gold Tail \t Top Predicted Tails \t Raw Rank \t Filtered Rank\n")
for l in lines:
f.write(str(l))
tail_ma_raw = avg_rank_raw
tail_ma_filter = avg_rank_filter
tail_hits_raw = hits_at_10_raw
tail_hits_filter = hits_at_10_filter
avg_rank_raw = 0.0
avg_rank_filter = 0.0
hits_at_10_raw = 0.0
hits_at_10_filter = 0.0
lines = []
counter = 1
for triple in all_test_triples:
rank_raw, rank_filter, top_10 = predict_best_head(triple, all_triples, entity_list, entity_embeddings,
entity_embeddings_img,
relation_embeddings)
line = triple[1] + "\t" + triple[2] + "\t" + triple[0] + "\t" + str(top_10) + "\t" + str(rank_raw) + "\t" + str(
rank_filter) + "\n"
#print(line)
lines.append(line)
print(str(counter) + "/" + str(len(all_test_triples)) + " " + str(rank_raw) + " " + str(rank_filter))
counter += 1
avg_rank_raw += rank_raw
avg_rank_filter += rank_filter
if rank_raw <= 10:
hits_at_10_raw += 1
if rank_filter <= 10:
hits_at_10_filter += 1
avg_rank_raw /= len(all_test_triples)
avg_rank_filter /= len(all_test_triples)
hits_at_10_raw /= len(all_test_triples)
hits_at_10_filter /= len(all_test_triples)
print("MAR Raw", avg_rank_raw, "MAR Filter", avg_rank_filter)
print("Hits@10 Raw", hits_at_10_raw, "Hits@10 Filter", hits_at_10_filter)
# Write to a file
results_file = param.result_file.replace(".txt","head_prediction.txt")
if os.path.isfile(results_file):
results_file = results_file.replace(".txt", "_1.txt")
print("write the results into", results_file)
with open(results_file, "w") as f:
f.write("MAR Raw" + "\t" + str(avg_rank_raw) + "\t" + "MAR Filter" + "\t" + str(avg_rank_filter) + "\n")
f.write("Hits@10 Raw" + "\t" + str(hits_at_10_raw) + "\t" + "Hits@10 Filter" + "\t" + str(
hits_at_10_filter) + "\n" + "\n")
f.write("Tail \t Relation \t Gold Head \t Top Predicted Heads \t Raw Rank \t Filtered Rank\n")
for l in lines:
f.write(str(l))
head_ma_raw = avg_rank_raw
head_ma_filter = avg_rank_filter
head_hits_raw = hits_at_10_raw
head_hits_filter = hits_at_10_filter
print("+++++++++++++++ Evaluation Summary ++++++++++++++++")
print("MA Raw Tail \t MA Filter Tail \t Hits Raw Tail \t Hits Filter Tail")
print(str(tail_ma_raw)+"\t"+str(tail_ma_filter)+"\t"+str(tail_hits_raw)+"\t"+str(tail_hits_filter))
print("MA Raw Head \t MA Filter Head \t Hits Raw Head \t Hits Filter Head")
print(str(head_ma_raw)+"\t"+str(head_ma_filter)+"\t"+str(head_hits_raw)+"\t"+str(head_hits_filter))
print("MA Raw AVG \t MA Filter AVG \t Hits Raw AVG \t Hits Filter AVG")
avg_ma_raw = (head_ma_raw+tail_ma_raw)/2
avg_ma_filter = (head_ma_filter+tail_ma_filter)/2
avg_hits_raw = (head_hits_raw+tail_hits_raw)/2
avg_hits_filter = (head_hits_filter+tail_hits_filter)/2
print(str(avg_ma_raw)+"\t"+str(avg_ma_filter)+"\t"+str(avg_hits_raw)+"\t"+str(avg_hits_filter)) | 0.316686 | 0.125092 |
"""Determines support level for different steps for masters."""
from services.deps import GetOSPlatformName
from model.wf_config import FinditConfig
# Explicitly list unsupported masters. Additional work might be needed in order
# to support them.
_UNSUPPORTED_MASTERS = [
'chromium.lkgr', # Disable as results are not showed on Sheriff-o-Matic.
'chromium.gpu', # Disable as too many false positives.
'chromium.memory.fyi',
'chromium.gpu.fyi',
'chromium.perf',
]
def _ConvertOldMastersFormatToNew(masters_to_blacklisted_steps):
"""Converts the old masters format to the new rules dict.
Args:
masters_to_blacklisted_steps: A dict in the format:
{
'master1': ['step1', 'step2', ...],
'master2': ['step3', 'step4', ...]
}
Returns:
A dict in the latest rules dict format:
{
'supported_masters': {
'master1': {
'unsupported_steps: ['step1', 'step2', ...], (if any)
}
},
'global': {}
}
"""
supported_masters = {}
steps_for_masters_rules_in_latest_format = {
'supported_masters': supported_masters,
'global': {}
}
for master, unsupported_steps in masters_to_blacklisted_steps.iteritems():
supported_masters[master] = {}
if unsupported_steps:
supported_masters[master]['unsupported_steps'] = unsupported_steps
return steps_for_masters_rules_in_latest_format
def GetStepsForMastersRules(settings=None, version=None):
if settings is None:
settings = FinditConfig.Get(version)
return (settings.steps_for_masters_rules or
_ConvertOldMastersFormatToNew(settings.masters_to_blacklisted_steps))
def MasterIsSupported(master_name):
"""Returns ``True`` if the given master is supported, otherwise ``False``."""
return master_name in GetStepsForMastersRules()['supported_masters']
def StepIsSupportedForMaster(step_name, master_name):
"""Determines whether or not a step is supported for the given build master.
Args:
step_name: The name of the step to check.
master_name: The name of the build master to check.
Returns:
True if Findit supports analyzing the failure, False otherwise.
Rules:
1. If a master is not supported, then neither are any of its steps.
2. If a master specifies check_global = True, then all of its steps are
supported except those according to those blacklisted under global.
3. If a master specifies check_global = True, but also specifies a
supported_steps, then supported_steps is to override any blacklisted
steps under global.
4. If a master specifies check_global = True, but also species its own
unsupported_list, those unsupported_steps are in addition to those
under global.
5. If a master specifies check_global = False, then all steps under
'supported_steps' are always supported and nothing else.
'unsupported_steps' is not allowed.
"""
if not MasterIsSupported(master_name):
return False
steps_for_masters_rules = GetStepsForMastersRules()
supported_masters = steps_for_masters_rules['supported_masters']
supported_master = supported_masters[master_name]
check_global = supported_master.get('check_global', True)
if not check_global:
supported_steps = supported_master['supported_steps']
return step_name in supported_steps
supported_steps = supported_master.get('supported_steps', [])
unsupported_steps = supported_master.get('unsupported_steps', [])
global_unsupported_steps = (
steps_for_masters_rules['global'].get('unsupported_steps', []))
return (step_name in supported_steps or
(step_name not in unsupported_steps and
step_name not in global_unsupported_steps))
def EnableStrictRegexForCompileLinkFailures(wf_mastername, wf_buildername):
"""Returns True if strict regex should be used for the given builder."""
trybot_config = FinditConfig.Get().builders_to_trybots.get(
wf_mastername, {}).get(wf_buildername, {})
return trybot_config.get('strict_regex', False)
def ShouldSkipTestTryJobs(wf_mastername, wf_buildername):
"""Returns True if test try jobs should be triggered.
By default, test try jobs should be supported unless the master/builder
specifies to bail out.
Args:
wf_mastername: The mastername of a waterfall builder.
wf_buildername: The buildername of a waterfall builder.
Returns:
True if test try jobs are to be skipped, False otherwise.
"""
trybot_config = FinditConfig.Get().builders_to_trybots.get(
wf_mastername, {}).get(wf_buildername, {})
return trybot_config.get('not_run_tests', False)
def GetTryJobSettings():
return FinditConfig().Get().try_job_settings
def GetSwarmingSettings():
return FinditConfig().Get().swarming_settings
def GetDownloadBuildDataSettings():
return FinditConfig().Get().download_build_data_settings
def GetActionSettings():
return FinditConfig().Get().action_settings
def GetCheckFlakeSettings():
return FinditConfig().Get().check_flake_settings
def GetFlakeDetectionSettings():
return FinditConfig.Get().flake_detection_settings
def GetCodeCoverageSettings():
return FinditConfig().Get().code_coverage_settings
def GetCodeReviewSettings():
return FinditConfig().Get().code_review_settings | appengine/findit/waterfall/waterfall_config.py | """Determines support level for different steps for masters."""
from services.deps import GetOSPlatformName
from model.wf_config import FinditConfig
# Explicitly list unsupported masters. Additional work might be needed in order
# to support them.
_UNSUPPORTED_MASTERS = [
'chromium.lkgr', # Disable as results are not showed on Sheriff-o-Matic.
'chromium.gpu', # Disable as too many false positives.
'chromium.memory.fyi',
'chromium.gpu.fyi',
'chromium.perf',
]
def _ConvertOldMastersFormatToNew(masters_to_blacklisted_steps):
"""Converts the old masters format to the new rules dict.
Args:
masters_to_blacklisted_steps: A dict in the format:
{
'master1': ['step1', 'step2', ...],
'master2': ['step3', 'step4', ...]
}
Returns:
A dict in the latest rules dict format:
{
'supported_masters': {
'master1': {
'unsupported_steps: ['step1', 'step2', ...], (if any)
}
},
'global': {}
}
"""
supported_masters = {}
steps_for_masters_rules_in_latest_format = {
'supported_masters': supported_masters,
'global': {}
}
for master, unsupported_steps in masters_to_blacklisted_steps.iteritems():
supported_masters[master] = {}
if unsupported_steps:
supported_masters[master]['unsupported_steps'] = unsupported_steps
return steps_for_masters_rules_in_latest_format
def GetStepsForMastersRules(settings=None, version=None):
if settings is None:
settings = FinditConfig.Get(version)
return (settings.steps_for_masters_rules or
_ConvertOldMastersFormatToNew(settings.masters_to_blacklisted_steps))
def MasterIsSupported(master_name):
"""Returns ``True`` if the given master is supported, otherwise ``False``."""
return master_name in GetStepsForMastersRules()['supported_masters']
def StepIsSupportedForMaster(step_name, master_name):
"""Determines whether or not a step is supported for the given build master.
Args:
step_name: The name of the step to check.
master_name: The name of the build master to check.
Returns:
True if Findit supports analyzing the failure, False otherwise.
Rules:
1. If a master is not supported, then neither are any of its steps.
2. If a master specifies check_global = True, then all of its steps are
supported except those according to those blacklisted under global.
3. If a master specifies check_global = True, but also specifies a
supported_steps, then supported_steps is to override any blacklisted
steps under global.
4. If a master specifies check_global = True, but also species its own
unsupported_list, those unsupported_steps are in addition to those
under global.
5. If a master specifies check_global = False, then all steps under
'supported_steps' are always supported and nothing else.
'unsupported_steps' is not allowed.
"""
if not MasterIsSupported(master_name):
return False
steps_for_masters_rules = GetStepsForMastersRules()
supported_masters = steps_for_masters_rules['supported_masters']
supported_master = supported_masters[master_name]
check_global = supported_master.get('check_global', True)
if not check_global:
supported_steps = supported_master['supported_steps']
return step_name in supported_steps
supported_steps = supported_master.get('supported_steps', [])
unsupported_steps = supported_master.get('unsupported_steps', [])
global_unsupported_steps = (
steps_for_masters_rules['global'].get('unsupported_steps', []))
return (step_name in supported_steps or
(step_name not in unsupported_steps and
step_name not in global_unsupported_steps))
def EnableStrictRegexForCompileLinkFailures(wf_mastername, wf_buildername):
"""Returns True if strict regex should be used for the given builder."""
trybot_config = FinditConfig.Get().builders_to_trybots.get(
wf_mastername, {}).get(wf_buildername, {})
return trybot_config.get('strict_regex', False)
def ShouldSkipTestTryJobs(wf_mastername, wf_buildername):
"""Returns True if test try jobs should be triggered.
By default, test try jobs should be supported unless the master/builder
specifies to bail out.
Args:
wf_mastername: The mastername of a waterfall builder.
wf_buildername: The buildername of a waterfall builder.
Returns:
True if test try jobs are to be skipped, False otherwise.
"""
trybot_config = FinditConfig.Get().builders_to_trybots.get(
wf_mastername, {}).get(wf_buildername, {})
return trybot_config.get('not_run_tests', False)
def GetTryJobSettings():
return FinditConfig().Get().try_job_settings
def GetSwarmingSettings():
return FinditConfig().Get().swarming_settings
def GetDownloadBuildDataSettings():
return FinditConfig().Get().download_build_data_settings
def GetActionSettings():
return FinditConfig().Get().action_settings
def GetCheckFlakeSettings():
return FinditConfig().Get().check_flake_settings
def GetFlakeDetectionSettings():
return FinditConfig.Get().flake_detection_settings
def GetCodeCoverageSettings():
return FinditConfig().Get().code_coverage_settings
def GetCodeReviewSettings():
return FinditConfig().Get().code_review_settings | 0.907985 | 0.599485 |
import numpy as np
from qutip.qip.gates import globalphase
class CircuitProcessor(object):
"""
Base class for representation of the physical implementation of a quantum
program/algorithm on a specified qubit system.
"""
def __init__(self, N, correct_global_phase):
"""
Parameters
----------
N: Integer
The number of qubits in the system.
correct_global_phase: Boolean
Check if the global phases should be included in the final result.
"""
self.N = N
self.correct_global_phase = correct_global_phase
def optimize_circuit(self, qc):
"""
Function to take a quantum circuit/algorithm and convert it into the
optimal form/basis for the desired physical system.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
Returns
--------
qc: QubitCircuit
The optimal circuit representation.
"""
raise NotImplementedError("Use the function in the sub-class")
def adjacent_gates(self, qc, setup):
"""
Function to take a quantum circuit/algorithm and convert it into the
optimal form/basis for the desired physical system.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
setup: String
Takes the nature of the spin chain; linear or circular.
Returns
--------
qc: QubitCircuit
The resolved circuit representation.
"""
raise NotImplementedError("Use the function in the sub-class")
def load_circuit(self, qc):
"""
Translates an abstract quantum circuit to its corresponding Hamiltonian
for a specific model.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
"""
raise NotImplementedError("Use the function in the sub-class")
def get_ops_and_u(self):
"""
Returns the Hamiltonian operators and corresponding values by stacking
them together.
"""
raise NotImplementedError("Use the function in the sub-class")
def get_ops_labels(self):
"""
Returns the Hamiltonian operators and corresponding labels by stacking
them together.
"""
pass
def eliminate_auxillary_modes(self, U):
return U
def run(self, qc=None):
"""
Generates the propagator matrix by running the Hamiltonian for the
appropriate time duration for the desired physical system.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
Returns
--------
U_list: list
The propagator matrix obtained from the physical implementation.
"""
if qc:
self.load_circuit(qc)
U_list = []
H_ops, H_u = self.get_ops_and_u()
for n in range(len(self.T_list)):
H = sum([H_u[n, m] * H_ops[m] for m in range(len(H_ops))])
U = (-1j * H * self.T_list[n]).expm()
U = self.eliminate_auxillary_modes(U)
U_list.append(U)
if self.correct_global_phase and self.global_phase != 0:
U_list.append(globalphase(self.global_phase, N=self.N))
return U_list
def run_state(self, qc=None, states=None):
"""
Generates the propagator matrix by running the Hamiltonian for the
appropriate time duration for the desired physical system with the
given initial state of the qubit register.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
states: Qobj
Initial state of the qubits in the register.
Returns
--------
U_list: list
The propagator matrix obtained from the physical implementation.
"""
if states is None:
raise NotImplementedError("Qubit state not defined.")
if qc:
self.load_circuit(qc)
U_list = [states]
H_ops, H_u = self.get_ops_and_u()
for n in range(len(self.T_list)):
H = sum([H_u[n, m] * H_ops[m] for m in range(len(H_ops))])
U = (-1j * H * self.T_list[n]).expm()
U = self.eliminate_auxillary_modes(U)
U_list.append(U)
if self.correct_global_phase and self.global_phase != 0:
U_list.append(globalphase(self.global_phase, N=self.N))
return U_list
def pulse_matrix(self):
"""
Generates the pulse matrix for the desired physical system.
Returns
--------
t, u, labels:
Returns the total time and label for every operation.
"""
dt = 0.01
H_ops, H_u = self.get_ops_and_u()
t_tot = sum(self.T_list)
n_t = int(np.ceil(t_tot / dt))
n_ops = len(H_ops)
t = np.linspace(0, t_tot, n_t)
u = np.zeros((n_ops, n_t))
t_start = 0
for n in range(len(self.T_list)):
t_idx_len = int(np.floor(self.T_list[n] / dt))
mm = 0
for m in range(len(H_ops)):
u[mm, t_start:(t_start + t_idx_len)] = (np.ones(t_idx_len) *
H_u[n, m])
mm += 1
t_start += t_idx_len
return t, u, self.get_ops_labels()
def plot_pulses(self):
"""
Maps the physical interaction between the circuit components for the
desired physical system.
Returns
--------
fig, ax: Figure
Maps the physical interaction between the circuit components.
"""
import matplotlib.pyplot as plt
t, u, u_labels = self.pulse_matrix()
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
for n, uu in enumerate(u):
ax.plot(t, u[n], label=u_labels[n])
ax.axis('tight')
ax.set_ylim(-1.5 * 2 * np.pi, 1.5 * 2 * np.pi)
ax.legend(loc='center left',
bbox_to_anchor=(1, 0.5), ncol=(1 + len(u) // 16))
fig.tight_layout()
return fig, ax | qutip/qip/models/circuitprocessor.py | import numpy as np
from qutip.qip.gates import globalphase
class CircuitProcessor(object):
"""
Base class for representation of the physical implementation of a quantum
program/algorithm on a specified qubit system.
"""
def __init__(self, N, correct_global_phase):
"""
Parameters
----------
N: Integer
The number of qubits in the system.
correct_global_phase: Boolean
Check if the global phases should be included in the final result.
"""
self.N = N
self.correct_global_phase = correct_global_phase
def optimize_circuit(self, qc):
"""
Function to take a quantum circuit/algorithm and convert it into the
optimal form/basis for the desired physical system.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
Returns
--------
qc: QubitCircuit
The optimal circuit representation.
"""
raise NotImplementedError("Use the function in the sub-class")
def adjacent_gates(self, qc, setup):
"""
Function to take a quantum circuit/algorithm and convert it into the
optimal form/basis for the desired physical system.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
setup: String
Takes the nature of the spin chain; linear or circular.
Returns
--------
qc: QubitCircuit
The resolved circuit representation.
"""
raise NotImplementedError("Use the function in the sub-class")
def load_circuit(self, qc):
"""
Translates an abstract quantum circuit to its corresponding Hamiltonian
for a specific model.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
"""
raise NotImplementedError("Use the function in the sub-class")
def get_ops_and_u(self):
"""
Returns the Hamiltonian operators and corresponding values by stacking
them together.
"""
raise NotImplementedError("Use the function in the sub-class")
def get_ops_labels(self):
"""
Returns the Hamiltonian operators and corresponding labels by stacking
them together.
"""
pass
def eliminate_auxillary_modes(self, U):
return U
def run(self, qc=None):
"""
Generates the propagator matrix by running the Hamiltonian for the
appropriate time duration for the desired physical system.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
Returns
--------
U_list: list
The propagator matrix obtained from the physical implementation.
"""
if qc:
self.load_circuit(qc)
U_list = []
H_ops, H_u = self.get_ops_and_u()
for n in range(len(self.T_list)):
H = sum([H_u[n, m] * H_ops[m] for m in range(len(H_ops))])
U = (-1j * H * self.T_list[n]).expm()
U = self.eliminate_auxillary_modes(U)
U_list.append(U)
if self.correct_global_phase and self.global_phase != 0:
U_list.append(globalphase(self.global_phase, N=self.N))
return U_list
def run_state(self, qc=None, states=None):
"""
Generates the propagator matrix by running the Hamiltonian for the
appropriate time duration for the desired physical system with the
given initial state of the qubit register.
Parameters
----------
qc: QubitCircuit
Takes the quantum circuit to be implemented.
states: Qobj
Initial state of the qubits in the register.
Returns
--------
U_list: list
The propagator matrix obtained from the physical implementation.
"""
if states is None:
raise NotImplementedError("Qubit state not defined.")
if qc:
self.load_circuit(qc)
U_list = [states]
H_ops, H_u = self.get_ops_and_u()
for n in range(len(self.T_list)):
H = sum([H_u[n, m] * H_ops[m] for m in range(len(H_ops))])
U = (-1j * H * self.T_list[n]).expm()
U = self.eliminate_auxillary_modes(U)
U_list.append(U)
if self.correct_global_phase and self.global_phase != 0:
U_list.append(globalphase(self.global_phase, N=self.N))
return U_list
def pulse_matrix(self):
"""
Generates the pulse matrix for the desired physical system.
Returns
--------
t, u, labels:
Returns the total time and label for every operation.
"""
dt = 0.01
H_ops, H_u = self.get_ops_and_u()
t_tot = sum(self.T_list)
n_t = int(np.ceil(t_tot / dt))
n_ops = len(H_ops)
t = np.linspace(0, t_tot, n_t)
u = np.zeros((n_ops, n_t))
t_start = 0
for n in range(len(self.T_list)):
t_idx_len = int(np.floor(self.T_list[n] / dt))
mm = 0
for m in range(len(H_ops)):
u[mm, t_start:(t_start + t_idx_len)] = (np.ones(t_idx_len) *
H_u[n, m])
mm += 1
t_start += t_idx_len
return t, u, self.get_ops_labels()
def plot_pulses(self):
"""
Maps the physical interaction between the circuit components for the
desired physical system.
Returns
--------
fig, ax: Figure
Maps the physical interaction between the circuit components.
"""
import matplotlib.pyplot as plt
t, u, u_labels = self.pulse_matrix()
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
for n, uu in enumerate(u):
ax.plot(t, u[n], label=u_labels[n])
ax.axis('tight')
ax.set_ylim(-1.5 * 2 * np.pi, 1.5 * 2 * np.pi)
ax.legend(loc='center left',
bbox_to_anchor=(1, 0.5), ncol=(1 + len(u) // 16))
fig.tight_layout()
return fig, ax | 0.956912 | 0.764716 |
import argparse
parser = argparse.ArgumentParser(description=
"""
This script train PACIFIC using fasta reads.
The model, training and validation
plots will be generated.
Also a model, tokenizer and label_maker will be generated
""")
OPTIONAL = parser._action_groups.pop()
REQUIRED = parser.add_argument_group('required arguments')
#Inputs
REQUIRED.add_argument("--Coronaviridae_reads",
help="file path to folder containing Coronaviridae fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Influenza_reads",
help="file path to folder containing Influenza fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Metapneumovirus_reads",
help="file path to folder containing Metapneumovirus fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Rhinovirus_reads",
help="file path to folder containing Rhinovirus fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Sars_cov_2_reads",
help="file path to folder containing SARS-CoV-2 fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Human_reads",
help="file path to folder containing Human fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("-t", "--tokenizer",
help="Tokenizer file path",
metavar='\b',
required=True)
REQUIRED.add_argument("--new_species_path",
help='coma separated path to the folder or folders containing new species of virus to train',
required=True,
)
REQUIRED.add_argument("--new_species_name",
help='coma separated name of the new species to train',
required=True,
)
#arguments
OPTIONAL.add_argument("--out_folder",
help='path to the output folder',
default="./")
OPTIONAL.add_argument("--k_mers",
help='K-mer number use to train the model',
default=9,
type=int)
OPTIONAL.add_argument("--model_name",
help='Name used to save the model',
default="PACIFIC")
OPTIONAL.add_argument("--GPU",
help='If True PACIFIC will be train using CuDNNLSTM',
default=False,
)
OPTIONAL.add_argument("--file_type",
help='fasta or fastq training files format (all files should have same format)',
default='fasta',
)
OPTIONAL.add_argument("--accuracy_limit",
help='Stop the training when all individual classes accuracies reaches that level',
default=0.999,
)
parser._action_groups.append(OPTIONAL)
ARGS = parser.parse_args()
# Inputs
CORONAVIRIDAE_READS = ARGS.Coronaviridae_reads
INFLUENZA_READS = ARGS.Influenza_reads
METAPMEUMOVIRUS_READS = ARGS.Metapneumovirus_reads
RHINOVIRUS_READS = ARGS.Rhinovirus_reads
SARS_COV_2_READS = ARGS.Sars_cov_2_reads
HUMAN_READS = ARGS.Human_reads
NEW_SPECIES_PATH = ARGS.new_species_path
NEW_SPECIES_NAME = ARGS.new_species_name
TOKENIZER = ARGS.tokenizer
# Arguments
OUT_FOLDER = ARGS.out_folder
K_MERS = ARGS.k_mers
MODEL_NAME = ARGS.model_name
GPU = ARGS.GPU
FILE_TYPE = ARGS.file_type
ACCURACY_LIMIT = ARGS.accuracy_limit
from Bio import SeqIO
import random
import os
from sklearn.preprocessing import LabelBinarizer
import numpy as np
from keras.preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Bidirectional, Conv1D, CuDNNLSTM
from keras.layers import Dropout, Activation, MaxPooling1D
import tensorflow as tf
from numpy.random import seed
import pickle
from datetime import datetime
from sklearn.utils import shuffle
from keras.models import load_model
import matplotlib.pyplot as plt
import seaborn as sns
import keras
tf.random.set_seed(42)
def prepare_read(trancriptome, file_type):
'''
function will take tranciprtome and make reads
'''
fasta_sequences = SeqIO.parse(open(trancriptome),file_type)
sequences = []
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
sequences.append(sequence)
return sequences
def process_reads(sequences, length, kmer):
'''
'''
r_reads = []
for i in enumerate(sequences):
# check the reads does not contain weird characters
if all(c in 'AGCT' for c in i[1].upper()):
r_reads.append(' '.join(i[1][x:x+kmer].upper() for x in range(len(i[1]) - kmer + 1)))
return r_reads
def main(directory, size_lenght, k_mer_size, file_type):
'''
'''
files = os.listdir(directory)
reads = []
for file in files:
all_transcripts = prepare_read(directory+'/'+file, file_type)
reads += process_reads(all_transcripts,
size_lenght,
k_mer_size)
return reads
def accuracy(labels, predictions):
'''
calculate accuracy
'''
if labels.shape != predictions.shape:
print('labels and predictions does not have same dimentions')
return False
correct = 0
for i in range(len(labels)):
if labels[i] == predictions[i]:
correct +=1
return correct/len(labels)
if __name__ == '__main__':
seed_value = 42
random.seed(seed_value)# 3. Set `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)# 4. Set `tensorflow` pseudo-random generator at a fixed value
# Create output folder if it does not exist
if os.path.isdir(OUT_FOLDER) is False:
print('Creating output folder '+OUT_FOLDER)
os.mkdir(OUT_FOLDER)
# Read lenght
read_lenght = 150
# get synthetic reads
print('Loading Coronaviridae reads')
Coronaviridae_reads = main(CORONAVIRIDAE_READS,
read_lenght,
K_MERS,
FILE_TYPE,
)
print('Loading Influenza reads')
Influenza_reads = main(INFLUENZA_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('Loading Metapneumovirus reads')
Metapneumovirus_reads = main(METAPMEUMOVIRUS_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('Loading Rhinovirus reads')
Rhinovirus_reads = main(RHINOVIRUS_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('Loading SARS-CoV-2 reads')
Sars_cov_2_reads = main(SARS_COV_2_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('Loading Human reads')
Human = main(HUMAN_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('loading new species')
NEW_SPECIES_PATH = NEW_SPECIES_PATH.split(',')
NEW_SPECIES_NAME = NEW_SPECIES_NAME.split(',')
new_species_sequences = {}
for i in enumerate(NEW_SPECIES_PATH):
new_species_sequences[NEW_SPECIES_NAME[i[0]]] = main(i[1],
read_lenght,
K_MERS,
FILE_TYPE
)
total_sequences = Coronaviridae_reads + \
Influenza_reads +\
Metapneumovirus_reads +\
Rhinovirus_reads +\
Sars_cov_2_reads +\
Human
for i in enumerate(new_species_sequences.keys()):
total_sequences += new_species_sequences[i[1]]
labels_to_fit = ['Coronaviridae','Influenza',"Metapneumovirus","Rhinovirus","Sars_cov_2", 'Human']
for i in enumerate(NEW_SPECIES_NAME):
labels_to_fit += [i[1]]
label_maker = LabelBinarizer()
transfomed_label = label_maker.fit(labels_to_fit)
# save label_maker
print('Saving object to convert output to labels '+ OUT_FOLDER+'/label_maker.'+MODEL_NAME+'.pickle')
with open(OUT_FOLDER+'/label_maker.'+MODEL_NAME+'.pickle', 'wb') as handle:
pickle.dump(label_maker, handle, protocol=pickle.HIGHEST_PROTOCOL)
labels = list(np.repeat('Coronaviridae',len(Coronaviridae_reads))) + \
list(np.repeat('Influenza',len(Influenza_reads))) + \
list(np.repeat('Metapneumovirus',len(Metapneumovirus_reads))) + \
list(np.repeat('Rhinovirus',len(Rhinovirus_reads))) + \
list(np.repeat('Sars_cov_2',len(Sars_cov_2_reads))) + \
list(np.repeat('Human',len(Human)))
for i in enumerate(NEW_SPECIES_NAME):
labels += list(np.repeat(i[1], len(new_species_sequences[i[1]])))
labels_proces = label_maker.transform(labels)
# Import the tokenizer already trainned
with open(TOKENIZER, 'rb') as handle:
tokenizer = pickle.load(handle)
tokenizer.fit_on_texts(total_sequences)
print('Converting reads into k-mers of lenght '+str(K_MERS))
sequences_preproces = tokenizer.texts_to_sequences(total_sequences)
# pad sequences
sequences_preproces = pad_sequences(sequences_preproces, maxlen = 142, padding = 'post')
print('Saving tokenizer object '+ OUT_FOLDER+'/tokenizer.'+MODEL_NAME+'.pickle')
with open(OUT_FOLDER+'/tokenizer.'+MODEL_NAME+'.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
sequences_preproces, labels_proces = shuffle(sequences_preproces, labels_proces)
number_labels = 6 + len(NEW_SPECIES_NAME)
max_features = len(tokenizer.word_index)+1
# Convolution
kernel_size = 3
filters = 128
pool_size = 3
# LSTM
lstm_output_size = 70
# Training
batch_size = 30
epochs = 1
# Define the model the model
model = Sequential()
model.add(Embedding(max_features, 100, input_length=sequences_preproces.shape[1]))
model.add(Dropout(0.20))
model.add(Conv1D(filters,
kernel_size,
padding='same',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Dropout(0.1))
if GPU == True:
model.add(Bidirectional(CuDNNLSTM(lstm_output_size)))
else:
model.add(Bidirectional(LSTM(lstm_output_size)))
model.add(Dropout(0.1))
model.add(Dense(50))
model.add(Dense(6))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['binary_accuracy',
'categorical_accuracy',
])
model.load_weights('/media/labuser/Data/pacific/model/pacific.01.pacific_9mers_nonGPU.h5')
#Delete last layers to change the number of neurons
model.pop()
model.pop()
model.add(Dense(len(labels_to_fit)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='Adam',
metrics=['binary_accuracy',
'categorical_accuracy',
])
model.summary()
# training time
#now = datetime.now().time() # time object
histories = []
print('Train...')
for epoch in range(epochs):
print("epoch %d" %epoch)
#train in batches of 200k sequences
for chunks in range(0, len(sequences_preproces), 200000):
start, end = chunks, chunks+200000
if end > len(sequences_preproces):
end = len(sequences_preproces)
print('chunk: ',start, end)
training_batch = sequences_preproces[start:end]
labels_batch = labels_proces[start:end]
X_train,X_test,y_train,y_test = train_test_split(training_batch,
labels_batch,
test_size=0.10,
random_state=42)
chunk_history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=1,
validation_data=(X_test, y_test)
)
inverser = label_maker.inverse_transform(y_test)
accuracies_per_class = []
for i in labels_to_fit:
index = np.where(inverser==i)
X_test_subselect = X_test[list(index[0])]
y_test_subselect = y_test[list(index[0])]
print('accuracy '+i)
predictions = np.where(model.predict(X_test_subselect) > 0.5, 1, 0)
right = 0
for j in enumerate(predictions):
if np.argmax(j[1]) == np.argmax(y_test_subselect[j[0]]):
right +=1
print(right/len(predictions))
print()
accuracies_per_class.append(right/len(predictions))
histories.append(chunk_history)
# check if all classes have equal or more than 0.99 accuracy
if all([i>=ACCURACY_LIMIT for i in accuracies_per_class]):
#now = datetime.now().time() # time object
print()
break
# save keras model
model.save(OUT_FOLDER+'/'+MODEL_NAME+".h5")
print("Saved model to disk")
#### plot the accuracies and losses
bi_acc = []
cat_acc = []
loss = []
val_bi_acc = []
val_cat_acc = []
val_loss = []
for i in histories:
bi_acc.append(i.history['binary_accuracy'][0])
cat_acc.append(i.history['categorical_accuracy'][0])
loss.append(i.history['loss'][0])
val_bi_acc.append(i.history['val_binary_accuracy'][0])
val_cat_acc.append(i.history['val_categorical_accuracy'][0])
val_loss.append(i.history['val_loss'][0])
f, ax = plt.subplots( figsize=(13,9))
sns.lineplot(x=np.arange(len(histories)), y=np.array(bi_acc), palette="tab10", linewidth=2.5, label='Binary accuracy')
sns.lineplot(x=np.arange(len(histories)), y=np.array(cat_acc), palette="tab10", linewidth=2.5, label='Categorical accuracy')
plt.ylabel('Accuracies')
plt.ylabel('Accuracies')
plt.savefig(OUT_FOLDER+'/trainning_accuracy_'+MODEL_NAME+'.pdf',
format='pdf',
dpi=1200,
bbox_inches='tight', pad_inches=0)
f, ax = plt.subplots( figsize=(13,9))
sns.lineplot(x=np.arange(len(histories)), y=np.array(loss), palette="tab10", linewidth=2.5, label='loss')
plt.ylabel('Loss')
plt.savefig(OUT_FOLDER+'/training_loss_'+MODEL_NAME+'.pdf',
format='pdf',
dpi=1200,
bbox_inches='tight', pad_inches=0)
f, ax = plt.subplots( figsize=(13,9))
sns.lineplot(x=np.arange(len(histories)), y=np.array(val_bi_acc), palette="tab10", linewidth=2.5, label='Validation binary accuracy')
sns.lineplot(x=np.arange(len(histories)), y=np.array(val_cat_acc), palette="tab10", linewidth=2.5, label='Validation categorical accuracy')
plt.ylabel('Percentage of predicted reads')
plt.savefig(OUT_FOLDER+'/val_training_accuracy_'+MODEL_NAME+'.pdf',
format='pdf',
dpi=1200,
bbox_inches='tight', pad_inches=0)
f, ax = plt.subplots( figsize=(13,9))
sns.lineplot(x=np.arange(len(histories)), y=np.array(val_loss), palette="tab10", linewidth=2.5, label='Validation loss')
plt.ylabel('Loss')
plt.savefig(OUT_FOLDER+'/val_loss_'+MODEL_NAME+'.pdf',
format='pdf',
dpi=1200,
bbox_inches='tight', pad_inches=0) | scripts/transfer_learning_pacific.py | import argparse
parser = argparse.ArgumentParser(description=
"""
This script train PACIFIC using fasta reads.
The model, training and validation
plots will be generated.
Also a model, tokenizer and label_maker will be generated
""")
OPTIONAL = parser._action_groups.pop()
REQUIRED = parser.add_argument_group('required arguments')
#Inputs
REQUIRED.add_argument("--Coronaviridae_reads",
help="file path to folder containing Coronaviridae fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Influenza_reads",
help="file path to folder containing Influenza fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Metapneumovirus_reads",
help="file path to folder containing Metapneumovirus fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Rhinovirus_reads",
help="file path to folder containing Rhinovirus fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Sars_cov_2_reads",
help="file path to folder containing SARS-CoV-2 fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("--Human_reads",
help="file path to folder containing Human fasta files to train PACIFIC",
required=True)
REQUIRED.add_argument("-t", "--tokenizer",
help="Tokenizer file path",
metavar='\b',
required=True)
REQUIRED.add_argument("--new_species_path",
help='coma separated path to the folder or folders containing new species of virus to train',
required=True,
)
REQUIRED.add_argument("--new_species_name",
help='coma separated name of the new species to train',
required=True,
)
#arguments
OPTIONAL.add_argument("--out_folder",
help='path to the output folder',
default="./")
OPTIONAL.add_argument("--k_mers",
help='K-mer number use to train the model',
default=9,
type=int)
OPTIONAL.add_argument("--model_name",
help='Name used to save the model',
default="PACIFIC")
OPTIONAL.add_argument("--GPU",
help='If True PACIFIC will be train using CuDNNLSTM',
default=False,
)
OPTIONAL.add_argument("--file_type",
help='fasta or fastq training files format (all files should have same format)',
default='fasta',
)
OPTIONAL.add_argument("--accuracy_limit",
help='Stop the training when all individual classes accuracies reaches that level',
default=0.999,
)
parser._action_groups.append(OPTIONAL)
ARGS = parser.parse_args()
# Inputs
CORONAVIRIDAE_READS = ARGS.Coronaviridae_reads
INFLUENZA_READS = ARGS.Influenza_reads
METAPMEUMOVIRUS_READS = ARGS.Metapneumovirus_reads
RHINOVIRUS_READS = ARGS.Rhinovirus_reads
SARS_COV_2_READS = ARGS.Sars_cov_2_reads
HUMAN_READS = ARGS.Human_reads
NEW_SPECIES_PATH = ARGS.new_species_path
NEW_SPECIES_NAME = ARGS.new_species_name
TOKENIZER = ARGS.tokenizer
# Arguments
OUT_FOLDER = ARGS.out_folder
K_MERS = ARGS.k_mers
MODEL_NAME = ARGS.model_name
GPU = ARGS.GPU
FILE_TYPE = ARGS.file_type
ACCURACY_LIMIT = ARGS.accuracy_limit
from Bio import SeqIO
import random
import os
from sklearn.preprocessing import LabelBinarizer
import numpy as np
from keras.preprocessing.text import Tokenizer
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Bidirectional, Conv1D, CuDNNLSTM
from keras.layers import Dropout, Activation, MaxPooling1D
import tensorflow as tf
from numpy.random import seed
import pickle
from datetime import datetime
from sklearn.utils import shuffle
from keras.models import load_model
import matplotlib.pyplot as plt
import seaborn as sns
import keras
tf.random.set_seed(42)
def prepare_read(trancriptome, file_type):
'''
function will take tranciprtome and make reads
'''
fasta_sequences = SeqIO.parse(open(trancriptome),file_type)
sequences = []
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
sequences.append(sequence)
return sequences
def process_reads(sequences, length, kmer):
'''
'''
r_reads = []
for i in enumerate(sequences):
# check the reads does not contain weird characters
if all(c in 'AGCT' for c in i[1].upper()):
r_reads.append(' '.join(i[1][x:x+kmer].upper() for x in range(len(i[1]) - kmer + 1)))
return r_reads
def main(directory, size_lenght, k_mer_size, file_type):
'''
'''
files = os.listdir(directory)
reads = []
for file in files:
all_transcripts = prepare_read(directory+'/'+file, file_type)
reads += process_reads(all_transcripts,
size_lenght,
k_mer_size)
return reads
def accuracy(labels, predictions):
'''
calculate accuracy
'''
if labels.shape != predictions.shape:
print('labels and predictions does not have same dimentions')
return False
correct = 0
for i in range(len(labels)):
if labels[i] == predictions[i]:
correct +=1
return correct/len(labels)
if __name__ == '__main__':
seed_value = 42
random.seed(seed_value)# 3. Set `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)# 4. Set `tensorflow` pseudo-random generator at a fixed value
# Create output folder if it does not exist
if os.path.isdir(OUT_FOLDER) is False:
print('Creating output folder '+OUT_FOLDER)
os.mkdir(OUT_FOLDER)
# Read lenght
read_lenght = 150
# get synthetic reads
print('Loading Coronaviridae reads')
Coronaviridae_reads = main(CORONAVIRIDAE_READS,
read_lenght,
K_MERS,
FILE_TYPE,
)
print('Loading Influenza reads')
Influenza_reads = main(INFLUENZA_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('Loading Metapneumovirus reads')
Metapneumovirus_reads = main(METAPMEUMOVIRUS_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('Loading Rhinovirus reads')
Rhinovirus_reads = main(RHINOVIRUS_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('Loading SARS-CoV-2 reads')
Sars_cov_2_reads = main(SARS_COV_2_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('Loading Human reads')
Human = main(HUMAN_READS,
read_lenght,
K_MERS,
FILE_TYPE
)
print('loading new species')
NEW_SPECIES_PATH = NEW_SPECIES_PATH.split(',')
NEW_SPECIES_NAME = NEW_SPECIES_NAME.split(',')
new_species_sequences = {}
for i in enumerate(NEW_SPECIES_PATH):
new_species_sequences[NEW_SPECIES_NAME[i[0]]] = main(i[1],
read_lenght,
K_MERS,
FILE_TYPE
)
total_sequences = Coronaviridae_reads + \
Influenza_reads +\
Metapneumovirus_reads +\
Rhinovirus_reads +\
Sars_cov_2_reads +\
Human
for i in enumerate(new_species_sequences.keys()):
total_sequences += new_species_sequences[i[1]]
labels_to_fit = ['Coronaviridae','Influenza',"Metapneumovirus","Rhinovirus","Sars_cov_2", 'Human']
for i in enumerate(NEW_SPECIES_NAME):
labels_to_fit += [i[1]]
label_maker = LabelBinarizer()
transfomed_label = label_maker.fit(labels_to_fit)
# save label_maker
print('Saving object to convert output to labels '+ OUT_FOLDER+'/label_maker.'+MODEL_NAME+'.pickle')
with open(OUT_FOLDER+'/label_maker.'+MODEL_NAME+'.pickle', 'wb') as handle:
pickle.dump(label_maker, handle, protocol=pickle.HIGHEST_PROTOCOL)
labels = list(np.repeat('Coronaviridae',len(Coronaviridae_reads))) + \
list(np.repeat('Influenza',len(Influenza_reads))) + \
list(np.repeat('Metapneumovirus',len(Metapneumovirus_reads))) + \
list(np.repeat('Rhinovirus',len(Rhinovirus_reads))) + \
list(np.repeat('Sars_cov_2',len(Sars_cov_2_reads))) + \
list(np.repeat('Human',len(Human)))
for i in enumerate(NEW_SPECIES_NAME):
labels += list(np.repeat(i[1], len(new_species_sequences[i[1]])))
labels_proces = label_maker.transform(labels)
# Import the tokenizer already trainned
with open(TOKENIZER, 'rb') as handle:
tokenizer = pickle.load(handle)
tokenizer.fit_on_texts(total_sequences)
print('Converting reads into k-mers of lenght '+str(K_MERS))
sequences_preproces = tokenizer.texts_to_sequences(total_sequences)
# pad sequences
sequences_preproces = pad_sequences(sequences_preproces, maxlen = 142, padding = 'post')
print('Saving tokenizer object '+ OUT_FOLDER+'/tokenizer.'+MODEL_NAME+'.pickle')
with open(OUT_FOLDER+'/tokenizer.'+MODEL_NAME+'.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
sequences_preproces, labels_proces = shuffle(sequences_preproces, labels_proces)
number_labels = 6 + len(NEW_SPECIES_NAME)
max_features = len(tokenizer.word_index)+1
# Convolution
kernel_size = 3
filters = 128
pool_size = 3
# LSTM
lstm_output_size = 70
# Training
batch_size = 30
epochs = 1
# Define the model the model
model = Sequential()
model.add(Embedding(max_features, 100, input_length=sequences_preproces.shape[1]))
model.add(Dropout(0.20))
model.add(Conv1D(filters,
kernel_size,
padding='same',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Dropout(0.1))
if GPU == True:
model.add(Bidirectional(CuDNNLSTM(lstm_output_size)))
else:
model.add(Bidirectional(LSTM(lstm_output_size)))
model.add(Dropout(0.1))
model.add(Dense(50))
model.add(Dense(6))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['binary_accuracy',
'categorical_accuracy',
])
model.load_weights('/media/labuser/Data/pacific/model/pacific.01.pacific_9mers_nonGPU.h5')
#Delete last layers to change the number of neurons
model.pop()
model.pop()
model.add(Dense(len(labels_to_fit)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='Adam',
metrics=['binary_accuracy',
'categorical_accuracy',
])
model.summary()
# training time
#now = datetime.now().time() # time object
histories = []
print('Train...')
for epoch in range(epochs):
print("epoch %d" %epoch)
#train in batches of 200k sequences
for chunks in range(0, len(sequences_preproces), 200000):
start, end = chunks, chunks+200000
if end > len(sequences_preproces):
end = len(sequences_preproces)
print('chunk: ',start, end)
training_batch = sequences_preproces[start:end]
labels_batch = labels_proces[start:end]
X_train,X_test,y_train,y_test = train_test_split(training_batch,
labels_batch,
test_size=0.10,
random_state=42)
chunk_history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=1,
validation_data=(X_test, y_test)
)
inverser = label_maker.inverse_transform(y_test)
accuracies_per_class = []
for i in labels_to_fit:
index = np.where(inverser==i)
X_test_subselect = X_test[list(index[0])]
y_test_subselect = y_test[list(index[0])]
print('accuracy '+i)
predictions = np.where(model.predict(X_test_subselect) > 0.5, 1, 0)
right = 0
for j in enumerate(predictions):
if np.argmax(j[1]) == np.argmax(y_test_subselect[j[0]]):
right +=1
print(right/len(predictions))
print()
accuracies_per_class.append(right/len(predictions))
histories.append(chunk_history)
# check if all classes have equal or more than 0.99 accuracy
if all([i>=ACCURACY_LIMIT for i in accuracies_per_class]):
#now = datetime.now().time() # time object
print()
break
# save keras model
model.save(OUT_FOLDER+'/'+MODEL_NAME+".h5")
print("Saved model to disk")
#### plot the accuracies and losses
bi_acc = []
cat_acc = []
loss = []
val_bi_acc = []
val_cat_acc = []
val_loss = []
for i in histories:
bi_acc.append(i.history['binary_accuracy'][0])
cat_acc.append(i.history['categorical_accuracy'][0])
loss.append(i.history['loss'][0])
val_bi_acc.append(i.history['val_binary_accuracy'][0])
val_cat_acc.append(i.history['val_categorical_accuracy'][0])
val_loss.append(i.history['val_loss'][0])
f, ax = plt.subplots( figsize=(13,9))
sns.lineplot(x=np.arange(len(histories)), y=np.array(bi_acc), palette="tab10", linewidth=2.5, label='Binary accuracy')
sns.lineplot(x=np.arange(len(histories)), y=np.array(cat_acc), palette="tab10", linewidth=2.5, label='Categorical accuracy')
plt.ylabel('Accuracies')
plt.ylabel('Accuracies')
plt.savefig(OUT_FOLDER+'/trainning_accuracy_'+MODEL_NAME+'.pdf',
format='pdf',
dpi=1200,
bbox_inches='tight', pad_inches=0)
f, ax = plt.subplots( figsize=(13,9))
sns.lineplot(x=np.arange(len(histories)), y=np.array(loss), palette="tab10", linewidth=2.5, label='loss')
plt.ylabel('Loss')
plt.savefig(OUT_FOLDER+'/training_loss_'+MODEL_NAME+'.pdf',
format='pdf',
dpi=1200,
bbox_inches='tight', pad_inches=0)
f, ax = plt.subplots( figsize=(13,9))
sns.lineplot(x=np.arange(len(histories)), y=np.array(val_bi_acc), palette="tab10", linewidth=2.5, label='Validation binary accuracy')
sns.lineplot(x=np.arange(len(histories)), y=np.array(val_cat_acc), palette="tab10", linewidth=2.5, label='Validation categorical accuracy')
plt.ylabel('Percentage of predicted reads')
plt.savefig(OUT_FOLDER+'/val_training_accuracy_'+MODEL_NAME+'.pdf',
format='pdf',
dpi=1200,
bbox_inches='tight', pad_inches=0)
f, ax = plt.subplots( figsize=(13,9))
sns.lineplot(x=np.arange(len(histories)), y=np.array(val_loss), palette="tab10", linewidth=2.5, label='Validation loss')
plt.ylabel('Loss')
plt.savefig(OUT_FOLDER+'/val_loss_'+MODEL_NAME+'.pdf',
format='pdf',
dpi=1200,
bbox_inches='tight', pad_inches=0) | 0.625324 | 0.26421 |
import unittest
import re
from typing import Pattern, Dict, List
import sqlalchemy
import sqlalchemy.orm
from qaqa_bot import model, game
from .util import CONFIG, create_sample_users
class FullGameTests(unittest.TestCase):
def setUp(self) -> None:
# Setup database schema
engine = sqlalchemy.create_engine(CONFIG['database']['connection'], isolation_level='SERIALIZABLE', echo=True)
model.Base.metadata.create_all(engine)
create_sample_users(engine)
self.game_server = game.GameServer(CONFIG, engine)
TEXT_SUBMIT_RESPONSE = r"🆗"
def test_leave_game(self) -> None:
# Create new game in "Funny Group" chat (chat_id=21)
self.game_server.new_game(21, "Funny Group")
# Let all users join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
self.game_server.join_game(21, 3)
self.game_server.join_game(21, 4)
# Set rounds
self.game_server.set_rounds(21, 2)
# Lukas leaves the game
msgs = self.game_server.leave_game(21, 3)
self.assertMessagesCorrect(msgs,
{21: re.compile("👋 Bye!")})
# Start game
msgs = self.game_server.start_game(21)
self.assertMessagesCorrect(msgs,
{21: re.compile("📝|Let's go!"),
**{i: re.compile("ask a question") for i in (11, 12, 14)}})
# Jannik leaves the game
msgs = self.game_server.leave_game(21, 4)
self.assertMessagesCorrect(msgs,
{21: re.compile("👋 Bye!"),
14: re.compile("No answer required")})
# Jenny cannot leave the game
msgs = self.game_server.leave_game(21, 4)
self.assertMessagesCorrect(msgs,
{21: re.compile("one of the last two participants")})
def test_simple_game(self) -> None:
# Create new game in "Funny Group" chat (chat_id=21)
self.game_server.new_game(21, "Funny Group")
# Let Michael and Jenny join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
# Set rounds
self.game_server.set_rounds(21, 2)
# Start game
msgs = self.game_server.start_game(21)
self.assertMessagesCorrect(msgs,
{21: re.compile("📝|Let's go!"),
**{i: re.compile("ask a question") for i in (11, 12)}})
# Write questions
msgs = self.game_server.submit_text(11, 6, "Question 1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# Lukas joins late
msgs = self.game_server.join_game(21, 3)
self.assertMessagesCorrect(msgs,
{21: re.compile("Welcome Lukas"),
13: re.compile("ask a question")})
msgs = self.game_server.submit_text(13, 7, "Quetsion 3")
self.assertMessagesCorrect(msgs, {13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.edit_submitted_message(13, 7, "Quetion 3")
self.assertMessagesCorrect(msgs, {13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(12, 8, "Question 2")
self.assertMessagesCorrect(msgs, {11: re.compile(r"(?s)answer.*?Quetion 3"),
12: re.compile(r"(?s)answer.*?Question 1|"
+ self.TEXT_SUBMIT_RESPONSE),
13: re.compile(r"(?s)answer.*?Question 2")})
msgs = self.game_server.edit_submitted_message(13, 7, "Question 3")
self.assertMessagesCorrect(msgs,
{11: re.compile(r"Question 3|updated"),
13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# Jannik wants to join too, but it's too late
msgs = self.game_server.join_game(21, 4)
self.assertMessagesCorrect(msgs,
{21: re.compile("already started")})
# Write answers
msgs = self.game_server.submit_text(11, 9, "Answer 1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.edit_submitted_message(13, 7, "Question Q3")
self.assertMessagesCorrect(msgs, {13: re.compile(r"not accepted")})
msgs = self.game_server.submit_text(13, 10, "Answer 3")
self.assertMessagesCorrect(msgs, {13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.get_group_status(21)
self.assertMessagesCorrect(msgs,
{21: re.compile(r"(?s)game is on.*3 sheets.*waiting for Jenny.*Michael.*"
r"Synchronous: yes")})
msgs = self.game_server.submit_text(12, 11, "Answer 2")
self.assertMessagesCorrect(
msgs,
{12: re.compile(self.TEXT_SUBMIT_RESPONSE),
21: re.compile("example.com:9090/game/")})
msgs = self.game_server.edit_submitted_message(13, 10, "Answer A3")
self.assertMessagesCorrect(msgs, {13: re.compile(r"not accepted")})
def test_asynchronous_game(self):
# Create new game in "Funny Group" chat (chat_id=21)
self.game_server.new_game(21, "Funny Group")
# Let Michael, Jenny and Lukas join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
self.game_server.join_game(21, 3)
# Set settings
self.game_server.set_rounds(21, 3)
self.game_server.set_synchronous(21, False)
# Start game
msgs = self.game_server.start_game(21)
self.assertMessagesCorrect(msgs,
{21: re.compile("📝|Let's go!"),
**{i: re.compile("ask a question") for i in (11, 12, 13)}})
# Let Michael write question
msgs = self.game_server.submit_text(11, 1, "Question 1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(12, 2, "Question 2")
self.assertMessagesCorrect(msgs,
{12: re.compile(r"(?s)answer.*?Question 1|" + self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(12, 3, "Answer 2")
self.assertMessagesCorrect(msgs, {12: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(13, 4, "Question 3")
self.assertMessagesCorrect(msgs,
{11: re.compile(r"(?s)answer.*?Question 3"),
13: re.compile(r"(?s)answer.*?Question 2|" + self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(13, 5, "Answer 3")
self.assertMessagesCorrect(msgs,
{13: re.compile(r"(?s)ask a question.*?Answer 2|" + self.TEXT_SUBMIT_RESPONSE)})
def test_parallel_games(self):
# Create new game in "Funny Group" chat (chat_id=21)
self.game_server.new_game(21, "Funny Group")
# Let Michael, Jenny join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
# … and another game in the "Serious Group" chat (chat_id=22)
self.game_server.new_game(22, "Serious Group")
# Lukas joins both games
self.game_server.join_game(22, 3)
self.game_server.join_game(21, 3)
# The first Game ist started (in synchronous mode)
self.game_server.set_rounds(21, 3)
msgs = self.game_server.start_game(21)
self.assertMessagesCorrect(msgs,
{21: re.compile("📝|Let's go!"),
**{i: re.compile("(?s)ask a question.*?Funny Group") for i in (11, 12, 13)}})
# Michael writes the first question
msgs = self.game_server.submit_text(11, 12, "Question A1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# Jannik and Michael join the second game
self.game_server.join_game(22, 4)
self.game_server.join_game(22, 1)
# The second Game is started (in asynchronous mode)
self.game_server.set_rounds(22, 3)
self.game_server.set_synchronous(22, False)
msgs = self.game_server.start_game(22)
# Only Michael and Jannik should be asked for a question, Lukas is still working on a question for the first
# game
self.assertMessagesCorrect(msgs,
{22: re.compile("📝|Let's go!"),
**{i: re.compile("(?s)ask a question.*?Serious Group") for i in (11, 14)}})
# We now have the following Sheets:
# Michael: {G2: }
# Jenny: {G1: }, {G1: "Question A1" (waiting)}
# Lukas: {G1: }, {G2: }
# Jannik: {G2: }
# Lukas submits two questions
msgs = self.game_server.submit_text(13, 13, "Question A3")
# The first question waits for the synchronous game …
self.assertMessagesCorrect(msgs,
{13: re.compile(r"(?s)ask a question.*?Serious Group|" + self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(13, 14, "Question B3")
# … the second question is put on Jannik's stack, but he's still working on a question
self.assertMessagesCorrect(msgs, {13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# Michael submits one question. He should not get the question in Game 1
msgs = self.game_server.submit_text(11, 15, "Question B1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE),
13: re.compile(r"(?s)answer.*?Question B1")})
# We now have the following Sheets:
# Michael: {G1: "Question A3" (waiting)},
# Jenny: {G1: }, {G1: "Question A1" (waiting)}
# Lukas: {G2: "Question B1"}
# Jannik: {G2: }, {G2: "Question B3"},
# Now, Jenny questions/answers two sheets, the first one triggers a new round in Game 1:
msgs = self.game_server.submit_text(12, 20, "Question A2")
self.assertMessagesCorrect(msgs, {11: re.compile(r"(?s)answer.*?Question A3"),
12: re.compile(r"(?s)answer.*?Question A1|" +
self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(12, 21, "Answer A2")
self.assertMessagesCorrect(msgs, {12: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# We now have the following Sheets:
# Michael: {G1: "Question A3"},
# Jenny: --
# Lukas: {G2: "Question B1"}, {G1: "Question A2"}, {G1: "Question A1", "Answer A2" (waiting)}
# Jannik: {G2: }, {G2: "Question B3"},
msgs = self.game_server.get_user_status(13)
self.assertMessagesCorrect(msgs,
{13: re.compile(r"(?s)Serious Group.*Funny Group.*2 pending sheets|Question B1")})
# Now, let's try to stop Game 2 with all sheets answered
msgs = self.game_server.stop_game(22)
self.assertMessagesCorrect(msgs,
{14: re.compile(r"No new question required|Question B3")})
msgs = self.game_server.submit_text(14, 16, "Answer B4")
self.assertMessagesCorrect(msgs, {14: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(11, 17, "Answer A1")
self.assertMessagesCorrect(msgs,
{11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# We now have the following Sheets:
# Michael: {G2: "Question B3", "Answer B4"}
# Jenny: {G1: "Question A3", "Answer A1" (waiting)}
# Lukas: {G2: "Question B1"}, {G1: "Question A2"}, {G1: "Question A1", "Answer A2" (waiting)},
# {G2: "Question B4", "Answer B1"}
# Jannik: --
msgs = self.game_server.submit_text(13, 18, "Answer B3")
self.assertMessagesCorrect(
msgs,
{13: re.compile(r"(?s)answer.*Question A2|" + self.TEXT_SUBMIT_RESPONSE),
22: re.compile("example.com:9090/game/")})
msgs = self.game_server.submit_text(13, 19, "Answer A3")
self.assertMessagesCorrect(msgs, {11: re.compile(r"(?s)ask.*?Answer A3"),
12: re.compile(r"(?s)ask.*?Answer A1"),
13: re.compile(r"(?s)ask.*?Answer A2|"
+ self.TEXT_SUBMIT_RESPONSE)})
def test_shuffle_players(self) -> None:
self.game_server.new_game(21, "Funny Group")
# Let all users join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
self.game_server.join_game(21, 3)
self.game_server.join_game(21, 4)
# Player order should be order of joining
msgs = self.game_server.get_group_status(21)
self.assertMessagesCorrect(msgs, {21: re.compile(r"(?s)Michael.*Jenny.*Lukas.*Jannik")})
# After shuffling ...
msgs = self.game_server.shuffle_players(21)
self.assertMessagesCorrect(msgs, {21: re.compile(r"🆗")})
# ... player order should not be the same.
msgs = self.game_server.get_group_status(21)
self.assertMessagesCorrect(msgs, {21: re.compile(r"(?s)Michael(?!.*Jenny.*Lukas.*Jannik)")})
def assertMessagesCorrect(self, messages: List[game.TranslatedMessage], expected: Dict[int, Pattern]) -> None:
for message in messages:
self.assertIn(message.chat_id, expected, f"Message \"{message.text}\" to chat id {message.chat_id}")
self.assertRegex(message.text, expected[message.chat_id])
for chat in expected:
self.assertIn(chat, list(m.chat_id for m in messages), f"No message to chat {chat} found") | test/test_game_fullgames.py |
import unittest
import re
from typing import Pattern, Dict, List
import sqlalchemy
import sqlalchemy.orm
from qaqa_bot import model, game
from .util import CONFIG, create_sample_users
class FullGameTests(unittest.TestCase):
def setUp(self) -> None:
# Setup database schema
engine = sqlalchemy.create_engine(CONFIG['database']['connection'], isolation_level='SERIALIZABLE', echo=True)
model.Base.metadata.create_all(engine)
create_sample_users(engine)
self.game_server = game.GameServer(CONFIG, engine)
TEXT_SUBMIT_RESPONSE = r"🆗"
def test_leave_game(self) -> None:
# Create new game in "Funny Group" chat (chat_id=21)
self.game_server.new_game(21, "Funny Group")
# Let all users join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
self.game_server.join_game(21, 3)
self.game_server.join_game(21, 4)
# Set rounds
self.game_server.set_rounds(21, 2)
# Lukas leaves the game
msgs = self.game_server.leave_game(21, 3)
self.assertMessagesCorrect(msgs,
{21: re.compile("👋 Bye!")})
# Start game
msgs = self.game_server.start_game(21)
self.assertMessagesCorrect(msgs,
{21: re.compile("📝|Let's go!"),
**{i: re.compile("ask a question") for i in (11, 12, 14)}})
# Jannik leaves the game
msgs = self.game_server.leave_game(21, 4)
self.assertMessagesCorrect(msgs,
{21: re.compile("👋 Bye!"),
14: re.compile("No answer required")})
# Jenny cannot leave the game
msgs = self.game_server.leave_game(21, 4)
self.assertMessagesCorrect(msgs,
{21: re.compile("one of the last two participants")})
def test_simple_game(self) -> None:
# Create new game in "Funny Group" chat (chat_id=21)
self.game_server.new_game(21, "Funny Group")
# Let Michael and Jenny join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
# Set rounds
self.game_server.set_rounds(21, 2)
# Start game
msgs = self.game_server.start_game(21)
self.assertMessagesCorrect(msgs,
{21: re.compile("📝|Let's go!"),
**{i: re.compile("ask a question") for i in (11, 12)}})
# Write questions
msgs = self.game_server.submit_text(11, 6, "Question 1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# Lukas joins late
msgs = self.game_server.join_game(21, 3)
self.assertMessagesCorrect(msgs,
{21: re.compile("Welcome Lukas"),
13: re.compile("ask a question")})
msgs = self.game_server.submit_text(13, 7, "Quetsion 3")
self.assertMessagesCorrect(msgs, {13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.edit_submitted_message(13, 7, "Quetion 3")
self.assertMessagesCorrect(msgs, {13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(12, 8, "Question 2")
self.assertMessagesCorrect(msgs, {11: re.compile(r"(?s)answer.*?Quetion 3"),
12: re.compile(r"(?s)answer.*?Question 1|"
+ self.TEXT_SUBMIT_RESPONSE),
13: re.compile(r"(?s)answer.*?Question 2")})
msgs = self.game_server.edit_submitted_message(13, 7, "Question 3")
self.assertMessagesCorrect(msgs,
{11: re.compile(r"Question 3|updated"),
13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# Jannik wants to join too, but it's too late
msgs = self.game_server.join_game(21, 4)
self.assertMessagesCorrect(msgs,
{21: re.compile("already started")})
# Write answers
msgs = self.game_server.submit_text(11, 9, "Answer 1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.edit_submitted_message(13, 7, "Question Q3")
self.assertMessagesCorrect(msgs, {13: re.compile(r"not accepted")})
msgs = self.game_server.submit_text(13, 10, "Answer 3")
self.assertMessagesCorrect(msgs, {13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.get_group_status(21)
self.assertMessagesCorrect(msgs,
{21: re.compile(r"(?s)game is on.*3 sheets.*waiting for Jenny.*Michael.*"
r"Synchronous: yes")})
msgs = self.game_server.submit_text(12, 11, "Answer 2")
self.assertMessagesCorrect(
msgs,
{12: re.compile(self.TEXT_SUBMIT_RESPONSE),
21: re.compile("example.com:9090/game/")})
msgs = self.game_server.edit_submitted_message(13, 10, "Answer A3")
self.assertMessagesCorrect(msgs, {13: re.compile(r"not accepted")})
def test_asynchronous_game(self):
# Create new game in "Funny Group" chat (chat_id=21)
self.game_server.new_game(21, "Funny Group")
# Let Michael, Jenny and Lukas join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
self.game_server.join_game(21, 3)
# Set settings
self.game_server.set_rounds(21, 3)
self.game_server.set_synchronous(21, False)
# Start game
msgs = self.game_server.start_game(21)
self.assertMessagesCorrect(msgs,
{21: re.compile("📝|Let's go!"),
**{i: re.compile("ask a question") for i in (11, 12, 13)}})
# Let Michael write question
msgs = self.game_server.submit_text(11, 1, "Question 1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(12, 2, "Question 2")
self.assertMessagesCorrect(msgs,
{12: re.compile(r"(?s)answer.*?Question 1|" + self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(12, 3, "Answer 2")
self.assertMessagesCorrect(msgs, {12: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(13, 4, "Question 3")
self.assertMessagesCorrect(msgs,
{11: re.compile(r"(?s)answer.*?Question 3"),
13: re.compile(r"(?s)answer.*?Question 2|" + self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(13, 5, "Answer 3")
self.assertMessagesCorrect(msgs,
{13: re.compile(r"(?s)ask a question.*?Answer 2|" + self.TEXT_SUBMIT_RESPONSE)})
def test_parallel_games(self):
# Create new game in "Funny Group" chat (chat_id=21)
self.game_server.new_game(21, "Funny Group")
# Let Michael, Jenny join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
# … and another game in the "Serious Group" chat (chat_id=22)
self.game_server.new_game(22, "Serious Group")
# Lukas joins both games
self.game_server.join_game(22, 3)
self.game_server.join_game(21, 3)
# The first Game ist started (in synchronous mode)
self.game_server.set_rounds(21, 3)
msgs = self.game_server.start_game(21)
self.assertMessagesCorrect(msgs,
{21: re.compile("📝|Let's go!"),
**{i: re.compile("(?s)ask a question.*?Funny Group") for i in (11, 12, 13)}})
# Michael writes the first question
msgs = self.game_server.submit_text(11, 12, "Question A1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# Jannik and Michael join the second game
self.game_server.join_game(22, 4)
self.game_server.join_game(22, 1)
# The second Game is started (in asynchronous mode)
self.game_server.set_rounds(22, 3)
self.game_server.set_synchronous(22, False)
msgs = self.game_server.start_game(22)
# Only Michael and Jannik should be asked for a question, Lukas is still working on a question for the first
# game
self.assertMessagesCorrect(msgs,
{22: re.compile("📝|Let's go!"),
**{i: re.compile("(?s)ask a question.*?Serious Group") for i in (11, 14)}})
# We now have the following Sheets:
# Michael: {G2: }
# Jenny: {G1: }, {G1: "Question A1" (waiting)}
# Lukas: {G1: }, {G2: }
# Jannik: {G2: }
# Lukas submits two questions
msgs = self.game_server.submit_text(13, 13, "Question A3")
# The first question waits for the synchronous game …
self.assertMessagesCorrect(msgs,
{13: re.compile(r"(?s)ask a question.*?Serious Group|" + self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(13, 14, "Question B3")
# … the second question is put on Jannik's stack, but he's still working on a question
self.assertMessagesCorrect(msgs, {13: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# Michael submits one question. He should not get the question in Game 1
msgs = self.game_server.submit_text(11, 15, "Question B1")
self.assertMessagesCorrect(msgs, {11: re.compile(self.TEXT_SUBMIT_RESPONSE),
13: re.compile(r"(?s)answer.*?Question B1")})
# We now have the following Sheets:
# Michael: {G1: "Question A3" (waiting)},
# Jenny: {G1: }, {G1: "Question A1" (waiting)}
# Lukas: {G2: "Question B1"}
# Jannik: {G2: }, {G2: "Question B3"},
# Now, Jenny questions/answers two sheets, the first one triggers a new round in Game 1:
msgs = self.game_server.submit_text(12, 20, "Question A2")
self.assertMessagesCorrect(msgs, {11: re.compile(r"(?s)answer.*?Question A3"),
12: re.compile(r"(?s)answer.*?Question A1|" +
self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(12, 21, "Answer A2")
self.assertMessagesCorrect(msgs, {12: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# We now have the following Sheets:
# Michael: {G1: "Question A3"},
# Jenny: --
# Lukas: {G2: "Question B1"}, {G1: "Question A2"}, {G1: "Question A1", "Answer A2" (waiting)}
# Jannik: {G2: }, {G2: "Question B3"},
msgs = self.game_server.get_user_status(13)
self.assertMessagesCorrect(msgs,
{13: re.compile(r"(?s)Serious Group.*Funny Group.*2 pending sheets|Question B1")})
# Now, let's try to stop Game 2 with all sheets answered
msgs = self.game_server.stop_game(22)
self.assertMessagesCorrect(msgs,
{14: re.compile(r"No new question required|Question B3")})
msgs = self.game_server.submit_text(14, 16, "Answer B4")
self.assertMessagesCorrect(msgs, {14: re.compile(self.TEXT_SUBMIT_RESPONSE)})
msgs = self.game_server.submit_text(11, 17, "Answer A1")
self.assertMessagesCorrect(msgs,
{11: re.compile(self.TEXT_SUBMIT_RESPONSE)})
# We now have the following Sheets:
# Michael: {G2: "Question B3", "Answer B4"}
# Jenny: {G1: "Question A3", "Answer A1" (waiting)}
# Lukas: {G2: "Question B1"}, {G1: "Question A2"}, {G1: "Question A1", "Answer A2" (waiting)},
# {G2: "Question B4", "Answer B1"}
# Jannik: --
msgs = self.game_server.submit_text(13, 18, "Answer B3")
self.assertMessagesCorrect(
msgs,
{13: re.compile(r"(?s)answer.*Question A2|" + self.TEXT_SUBMIT_RESPONSE),
22: re.compile("example.com:9090/game/")})
msgs = self.game_server.submit_text(13, 19, "Answer A3")
self.assertMessagesCorrect(msgs, {11: re.compile(r"(?s)ask.*?Answer A3"),
12: re.compile(r"(?s)ask.*?Answer A1"),
13: re.compile(r"(?s)ask.*?Answer A2|"
+ self.TEXT_SUBMIT_RESPONSE)})
def test_shuffle_players(self) -> None:
self.game_server.new_game(21, "Funny Group")
# Let all users join
self.game_server.join_game(21, 1)
self.game_server.join_game(21, 2)
self.game_server.join_game(21, 3)
self.game_server.join_game(21, 4)
# Player order should be order of joining
msgs = self.game_server.get_group_status(21)
self.assertMessagesCorrect(msgs, {21: re.compile(r"(?s)Michael.*Jenny.*Lukas.*Jannik")})
# After shuffling ...
msgs = self.game_server.shuffle_players(21)
self.assertMessagesCorrect(msgs, {21: re.compile(r"🆗")})
# ... player order should not be the same.
msgs = self.game_server.get_group_status(21)
self.assertMessagesCorrect(msgs, {21: re.compile(r"(?s)Michael(?!.*Jenny.*Lukas.*Jannik)")})
def assertMessagesCorrect(self, messages: List[game.TranslatedMessage], expected: Dict[int, Pattern]) -> None:
for message in messages:
self.assertIn(message.chat_id, expected, f"Message \"{message.text}\" to chat id {message.chat_id}")
self.assertRegex(message.text, expected[message.chat_id])
for chat in expected:
self.assertIn(chat, list(m.chat_id for m in messages), f"No message to chat {chat} found") | 0.557845 | 0.342957 |
field_metadata = {}
"""
Dictionary that contains the meta data of each field.
Required meta data entries are:
- **name**: human readable description
- **shortname**: description used in visualization etc
- **unit**: SI unit of the field
- **filename**: filename for output files
The naming convention for field keys is snake_case: ``field_name_3d``
"""
field_metadata['bathymetry_2d'] = {
'name': 'Bathymetry',
'shortname': 'Bathymetry',
'unit': 'm',
'filename': 'bathymetry2d',
}
field_metadata['z_coord_3d'] = {
'name': 'Mesh z coordinates',
'shortname': 'Z coordinates',
'unit': 'm',
'filename': 'ZCoord3d',
}
field_metadata['z_coord_ref_3d'] = {
'name': 'Static mesh z coordinates',
'shortname': 'Z coordinates',
'unit': 'm',
'filename': 'ZCoordRef3d',
}
field_metadata['uv_2d'] = {
'name': 'Depth averaged velocity',
'shortname': 'Depth averaged velocity',
'unit': 'm s-1',
'filename': 'Velocity2d',
}
field_metadata['sediment_2d'] = {
'name': 'Sediment',
'shortname': 'Sediment',
'unit': '',
'filename': 'Sediment2d',
}
field_metadata['uv_dav_2d'] = {
'name': 'Depth averaged velocity',
'shortname': 'Depth averaged velocity',
'unit': 'm s-1',
'filename': 'DAVelocity2d',
}
field_metadata['split_residual_2d'] = {
'name': 'Momentum eq. residual for mode splitting',
'shortname': 'Momentum residual',
'unit': 'm s-2',
'filename': 'SplitResidual2d',
}
field_metadata['uv_dav_3d'] = {
'name': 'Depth averaged velocity',
'shortname': 'Depth averaged velocity',
'unit': 'm s-1',
'filename': 'DAVelocity3d',
}
field_metadata['elev_2d'] = {
'name': 'Water elevation',
'shortname': 'Elevation',
'unit': 'm',
'filename': 'Elevation2d',
}
field_metadata['elev_domain_2d'] = {
'name': 'Surface elevation of domain',
'shortname': 'Elevation',
'unit': 'm',
'filename': 'ElevationDomain2d',
}
field_metadata['elev_cg_2d'] = {
'name': 'Water elevation CG',
'shortname': 'Elevation',
'unit': 'm',
'filename': 'ElevationCG2d',
}
field_metadata['uv_3d'] = {
'name': 'Horizontal velocity',
'shortname': 'Horizontal velocity',
'unit': 'm s-1',
'filename': 'Velocity3d',
}
field_metadata['w_3d'] = {
'name': 'Vertical velocity',
'shortname': 'Vertical velocity',
'unit': 'm s-1',
'filename': 'VertVelo3d',
}
field_metadata['w_mesh_3d'] = {
'name': 'Mesh velocity',
'shortname': 'Mesh velocity',
'unit': 'm s-1',
'filename': 'MeshVelo3d',
}
field_metadata['salt_3d'] = {
'name': 'Water salinity',
'shortname': 'Salinity',
'unit': 'psu',
'filename': 'Salinity3d',
}
field_metadata['temp_3d'] = {
'name': 'Water temperature',
'shortname': 'Temperature',
'unit': 'C',
'filename': 'Temperature3d',
}
field_metadata['density_3d'] = {
'name': 'Water density',
'shortname': 'Density',
'unit': 'kg m-3',
'filename': 'Density3d',
}
field_metadata['eddy_visc_3d'] = {
'name': 'Eddy Viscosity',
'shortname': 'Eddy Viscosity',
'unit': 'm2 s-1',
'filename': 'EddyVisc3d',
}
field_metadata['eddy_diff_3d'] = {
'name': 'Eddy diffusivity',
'shortname': 'Eddy diffusivity',
'unit': 'm2 s-1',
'filename': 'EddyDiff3d',
}
field_metadata['shear_freq_3d'] = {
'name': 'Vertical shear frequency squared',
'shortname': 'Vertical shear frequency squared',
'unit': 's-2',
'filename': 'ShearFreq3d',
}
field_metadata['buoy_freq_3d'] = {
'name': 'Buoyancy frequency squared',
'shortname': 'Buoyancy frequency squared',
'unit': 's-2',
'filename': 'BuoyFreq3d',
}
field_metadata['tke_3d'] = {
'name': 'Turbulent Kinetic Energy',
'shortname': 'Turbulent Kinetic Energy',
'unit': 'm2 s-2',
'filename': 'TurbKEnergy3d',
}
field_metadata['psi_3d'] = {
'name': 'Turbulence psi variable',
'shortname': 'Turbulence psi variable',
'unit': '',
'filename': 'TurbPsi3d',
}
field_metadata['eps_3d'] = {
'name': 'TKE dissipation rate',
'shortname': 'TKE dissipation rate',
'unit': 'm2 s-3',
'filename': 'TurbEps3d',
}
field_metadata['len_3d'] = {
'name': 'Turbulent length scale',
'shortname': 'Turbulent length scale',
'unit': 'm',
'filename': 'TurbLen3d',
}
field_metadata['baroc_head_3d'] = {
'name': 'Baroclinic head',
'shortname': 'Baroclinic head',
'unit': 'm',
'filename': 'BaroHead3d',
}
field_metadata['int_pg_3d'] = {
'name': 'Internal pressure gradient',
'shortname': 'Int. Pressure gradient',
'unit': 'm s-2',
'filename': 'IntPG3d',
}
field_metadata['smag_visc_3d'] = {
'name': 'Smagorinsky viscosity',
'shortname': 'Smagorinsky viscosity',
'unit': 'm2 s-1',
'filename': 'SmagViscosity3d',
}
field_metadata['max_h_diff'] = {
'name': 'Maximum stable horizontal diffusivity',
'shortname': 'Maximum horizontal diffusivity',
'unit': 'm2 s-1',
'filename': 'MaxHDiffusivity3d',
}
field_metadata['v_elem_size_3d'] = {
'name': 'Element size in vertical direction',
'shortname': 'Vertical element size',
'unit': 'm',
'filename': 'VElemSize3d',
}
field_metadata['v_elem_size_2d'] = {
'name': 'Element size in vertical direction',
'shortname': 'Vertical element size',
'unit': 'm',
'filename': 'VElemSize2d',
}
field_metadata['h_elem_size_3d'] = {
'name': 'Element size in horizontal direction',
'shortname': 'Horizontal element size',
'unit': 'm',
'filename': 'h_elem_size_3d',
}
field_metadata['h_elem_size_2d'] = {
'name': 'Element size in horizontal direction',
'shortname': 'Horizontal element size',
'unit': 'm',
'filename': 'h_elem_size_2d',
}
field_metadata['coriolis_2d'] = {
'name': 'Coriolis parameter',
'shortname': 'Coriolis parameter',
'unit': 's-1',
'filename': 'coriolis_2d',
}
field_metadata['coriolis_3d'] = {
'name': 'Coriolis parameter',
'shortname': 'Coriolis parameter',
'unit': 's-1',
'filename': 'coriolis_3d',
}
field_metadata['wind_stress_3d'] = {
'name': 'Wind stress',
'shortname': 'Wind stress',
'unit': 'Pa',
'filename': 'wind_stress_3d',
}
field_metadata['hcc_metric_3d'] = {
'name': 'HCC mesh quality',
'shortname': 'HCC metric',
'unit': '-',
'filename': 'HCCMetric3d',
}
field_metadata['q_2d'] = {
'name': 'Non-hydrostatic pressure at bottom',
'shortname': 'NH pressure',
'unit': 'Pa',
'filename': 'NHPressure2d',
}
field_metadata['w_2d'] = {
'name': 'Depth averaged vertical velocity',
'shortname': 'Depth averaged vertical velocity',
'unit': 'm s-1',
'filename': 'VertVelo2d',
} | thetis/field_defs.py | field_metadata = {}
"""
Dictionary that contains the meta data of each field.
Required meta data entries are:
- **name**: human readable description
- **shortname**: description used in visualization etc
- **unit**: SI unit of the field
- **filename**: filename for output files
The naming convention for field keys is snake_case: ``field_name_3d``
"""
field_metadata['bathymetry_2d'] = {
'name': 'Bathymetry',
'shortname': 'Bathymetry',
'unit': 'm',
'filename': 'bathymetry2d',
}
field_metadata['z_coord_3d'] = {
'name': 'Mesh z coordinates',
'shortname': 'Z coordinates',
'unit': 'm',
'filename': 'ZCoord3d',
}
field_metadata['z_coord_ref_3d'] = {
'name': 'Static mesh z coordinates',
'shortname': 'Z coordinates',
'unit': 'm',
'filename': 'ZCoordRef3d',
}
field_metadata['uv_2d'] = {
'name': 'Depth averaged velocity',
'shortname': 'Depth averaged velocity',
'unit': 'm s-1',
'filename': 'Velocity2d',
}
field_metadata['sediment_2d'] = {
'name': 'Sediment',
'shortname': 'Sediment',
'unit': '',
'filename': 'Sediment2d',
}
field_metadata['uv_dav_2d'] = {
'name': 'Depth averaged velocity',
'shortname': 'Depth averaged velocity',
'unit': 'm s-1',
'filename': 'DAVelocity2d',
}
field_metadata['split_residual_2d'] = {
'name': 'Momentum eq. residual for mode splitting',
'shortname': 'Momentum residual',
'unit': 'm s-2',
'filename': 'SplitResidual2d',
}
field_metadata['uv_dav_3d'] = {
'name': 'Depth averaged velocity',
'shortname': 'Depth averaged velocity',
'unit': 'm s-1',
'filename': 'DAVelocity3d',
}
field_metadata['elev_2d'] = {
'name': 'Water elevation',
'shortname': 'Elevation',
'unit': 'm',
'filename': 'Elevation2d',
}
field_metadata['elev_domain_2d'] = {
'name': 'Surface elevation of domain',
'shortname': 'Elevation',
'unit': 'm',
'filename': 'ElevationDomain2d',
}
field_metadata['elev_cg_2d'] = {
'name': 'Water elevation CG',
'shortname': 'Elevation',
'unit': 'm',
'filename': 'ElevationCG2d',
}
field_metadata['uv_3d'] = {
'name': 'Horizontal velocity',
'shortname': 'Horizontal velocity',
'unit': 'm s-1',
'filename': 'Velocity3d',
}
field_metadata['w_3d'] = {
'name': 'Vertical velocity',
'shortname': 'Vertical velocity',
'unit': 'm s-1',
'filename': 'VertVelo3d',
}
field_metadata['w_mesh_3d'] = {
'name': 'Mesh velocity',
'shortname': 'Mesh velocity',
'unit': 'm s-1',
'filename': 'MeshVelo3d',
}
field_metadata['salt_3d'] = {
'name': 'Water salinity',
'shortname': 'Salinity',
'unit': 'psu',
'filename': 'Salinity3d',
}
field_metadata['temp_3d'] = {
'name': 'Water temperature',
'shortname': 'Temperature',
'unit': 'C',
'filename': 'Temperature3d',
}
field_metadata['density_3d'] = {
'name': 'Water density',
'shortname': 'Density',
'unit': 'kg m-3',
'filename': 'Density3d',
}
field_metadata['eddy_visc_3d'] = {
'name': 'Eddy Viscosity',
'shortname': 'Eddy Viscosity',
'unit': 'm2 s-1',
'filename': 'EddyVisc3d',
}
field_metadata['eddy_diff_3d'] = {
'name': 'Eddy diffusivity',
'shortname': 'Eddy diffusivity',
'unit': 'm2 s-1',
'filename': 'EddyDiff3d',
}
field_metadata['shear_freq_3d'] = {
'name': 'Vertical shear frequency squared',
'shortname': 'Vertical shear frequency squared',
'unit': 's-2',
'filename': 'ShearFreq3d',
}
field_metadata['buoy_freq_3d'] = {
'name': 'Buoyancy frequency squared',
'shortname': 'Buoyancy frequency squared',
'unit': 's-2',
'filename': 'BuoyFreq3d',
}
field_metadata['tke_3d'] = {
'name': 'Turbulent Kinetic Energy',
'shortname': 'Turbulent Kinetic Energy',
'unit': 'm2 s-2',
'filename': 'TurbKEnergy3d',
}
field_metadata['psi_3d'] = {
'name': 'Turbulence psi variable',
'shortname': 'Turbulence psi variable',
'unit': '',
'filename': 'TurbPsi3d',
}
field_metadata['eps_3d'] = {
'name': 'TKE dissipation rate',
'shortname': 'TKE dissipation rate',
'unit': 'm2 s-3',
'filename': 'TurbEps3d',
}
field_metadata['len_3d'] = {
'name': 'Turbulent length scale',
'shortname': 'Turbulent length scale',
'unit': 'm',
'filename': 'TurbLen3d',
}
field_metadata['baroc_head_3d'] = {
'name': 'Baroclinic head',
'shortname': 'Baroclinic head',
'unit': 'm',
'filename': 'BaroHead3d',
}
field_metadata['int_pg_3d'] = {
'name': 'Internal pressure gradient',
'shortname': 'Int. Pressure gradient',
'unit': 'm s-2',
'filename': 'IntPG3d',
}
field_metadata['smag_visc_3d'] = {
'name': 'Smagorinsky viscosity',
'shortname': 'Smagorinsky viscosity',
'unit': 'm2 s-1',
'filename': 'SmagViscosity3d',
}
field_metadata['max_h_diff'] = {
'name': 'Maximum stable horizontal diffusivity',
'shortname': 'Maximum horizontal diffusivity',
'unit': 'm2 s-1',
'filename': 'MaxHDiffusivity3d',
}
field_metadata['v_elem_size_3d'] = {
'name': 'Element size in vertical direction',
'shortname': 'Vertical element size',
'unit': 'm',
'filename': 'VElemSize3d',
}
field_metadata['v_elem_size_2d'] = {
'name': 'Element size in vertical direction',
'shortname': 'Vertical element size',
'unit': 'm',
'filename': 'VElemSize2d',
}
field_metadata['h_elem_size_3d'] = {
'name': 'Element size in horizontal direction',
'shortname': 'Horizontal element size',
'unit': 'm',
'filename': 'h_elem_size_3d',
}
field_metadata['h_elem_size_2d'] = {
'name': 'Element size in horizontal direction',
'shortname': 'Horizontal element size',
'unit': 'm',
'filename': 'h_elem_size_2d',
}
field_metadata['coriolis_2d'] = {
'name': 'Coriolis parameter',
'shortname': 'Coriolis parameter',
'unit': 's-1',
'filename': 'coriolis_2d',
}
field_metadata['coriolis_3d'] = {
'name': 'Coriolis parameter',
'shortname': 'Coriolis parameter',
'unit': 's-1',
'filename': 'coriolis_3d',
}
field_metadata['wind_stress_3d'] = {
'name': 'Wind stress',
'shortname': 'Wind stress',
'unit': 'Pa',
'filename': 'wind_stress_3d',
}
field_metadata['hcc_metric_3d'] = {
'name': 'HCC mesh quality',
'shortname': 'HCC metric',
'unit': '-',
'filename': 'HCCMetric3d',
}
field_metadata['q_2d'] = {
'name': 'Non-hydrostatic pressure at bottom',
'shortname': 'NH pressure',
'unit': 'Pa',
'filename': 'NHPressure2d',
}
field_metadata['w_2d'] = {
'name': 'Depth averaged vertical velocity',
'shortname': 'Depth averaged vertical velocity',
'unit': 'm s-1',
'filename': 'VertVelo2d',
} | 0.779993 | 0.594316 |
import os
import argparse
import numpy as np
import pandas as pd
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def processLine(line):
# Identify if the line of log contains important information
if 'Sweep Count' in line:
return 'swp_count'
if 'Config' in line:
return 'c_start'
if '\n' == line:
return 'c_end'
if ' Epoch: ' in line:
return 'data'
if ' Test PER: ' in line:
return 'test_err'
def getStringBetween(line, start_str, end_str):
# Extract the substring between the start and end string
start_index = line.find(start_str) + len(start_str)
end_index = line.find(end_str)
return line[start_index: end_index]
def extractTestErr(line):
return float(getStringBetween(line, 'PER: ', '%'))
def extractTrainLoss(line):
return float(getStringBetween(line, 'Train Loss: ', '\tTrain PER'))
def extractValLoss(line):
return float(getStringBetween(line, 'Val Loss: ', '\tVal PER'))
def extractTrainErr(line):
return float(getStringBetween(line, 'Train PER: ', '\tVal Loss'))
def extractValErr(line):
return float(getStringBetween(line, 'Val PER: ', '%\n'))
def readLog(file):
# Reads the log to extract all the important information
expts = []
curr_expt = None
lowest_val_err = 100
in_config = False
for line in file:
out = processLine(line)
if not out and not in_config: continue
if out == 'swp_count':
# marks the start of an experiment sweep
curr_expt = {}
lowest_val_err = 100
curr_expt['train_data'] = []
elif out == 'test_err':
# marks the end of an experiment sweep. record the test error
curr_expt['test_err'] = extractTestErr(line)
curr_expt['lowest_val_err'] = lowest_val_err
#append to expt list
expts.append(curr_expt)
elif out == 'c_start':
# marks the start of reading the config text
in_config = True
curr_expt['config_text'] = ''
elif out == 'c_end':
# marks the end of reading the config text
in_config = False
elif in_config:
# record the text
curr_expt['config_text'] += line
if 'l2_regularizer' in line:
curr_expt['l2_regularizer'] = float(getStringBetween(line, 'l2_regularizer: ', '\n'))
elif 'dropout' in line:
curr_expt['dropout'] = float(getStringBetween(line, 'dropout: ', '\n'))
elif 'learning_rate' in line:
curr_expt['learning_rate'] = float(getStringBetween(line, 'learning_rate: ', '\n'))
elif out == 'data':
curr_expt['train_data'].append({
'train_loss': extractTrainLoss(line),
'val_loss': extractValLoss(line),
'train_err': extractTrainErr(line),
'val_err': extractValErr(line)
})
lowest_val_err = min(lowest_val_err, extractValErr(line))
return expts
def getXYZ(data, a_name, x_name, y_name, z_name):
# extracts the best validation error and their respective hyperparameters
a = []
x = []
y = []
z = []
for expt in data:
a.append(expt[a_name])
x.append(expt[x_name])
y.append(expt[y_name])
z.append(expt[z_name])
return [np.asarray(ls) for ls in [a, x, y, z]]
def main():
parser = argparse.ArgumentParser(description='Parse the log to get data for plotting')
parser.add_argument('--file', '-f', type=str, help='name of the log file relative to the project root directory', required=True)
args = parser.parse_args()
with open(os.path.join(ROOT_DIR, args.file), 'r') as file:
expt_list = readLog(file)
a, x, y, z = getXYZ(expt_list, 'dropout', 'l2_regularizer', 'learning_rate', 'lowest_val_err')
df = pd.DataFrame({
'dropout': a,
'l2_regularizer': x,
'learning_rate': y,
'lowest_val_err': z
})
df.to_csv(os.path.join(ROOT_DIR, os.path.dirname(args.file), 'sweeps.csv'))
if __name__ == '__main__':
main() | utils/log_to_csv.py | import os
import argparse
import numpy as np
import pandas as pd
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def processLine(line):
# Identify if the line of log contains important information
if 'Sweep Count' in line:
return 'swp_count'
if 'Config' in line:
return 'c_start'
if '\n' == line:
return 'c_end'
if ' Epoch: ' in line:
return 'data'
if ' Test PER: ' in line:
return 'test_err'
def getStringBetween(line, start_str, end_str):
# Extract the substring between the start and end string
start_index = line.find(start_str) + len(start_str)
end_index = line.find(end_str)
return line[start_index: end_index]
def extractTestErr(line):
return float(getStringBetween(line, 'PER: ', '%'))
def extractTrainLoss(line):
return float(getStringBetween(line, 'Train Loss: ', '\tTrain PER'))
def extractValLoss(line):
return float(getStringBetween(line, 'Val Loss: ', '\tVal PER'))
def extractTrainErr(line):
return float(getStringBetween(line, 'Train PER: ', '\tVal Loss'))
def extractValErr(line):
return float(getStringBetween(line, 'Val PER: ', '%\n'))
def readLog(file):
# Reads the log to extract all the important information
expts = []
curr_expt = None
lowest_val_err = 100
in_config = False
for line in file:
out = processLine(line)
if not out and not in_config: continue
if out == 'swp_count':
# marks the start of an experiment sweep
curr_expt = {}
lowest_val_err = 100
curr_expt['train_data'] = []
elif out == 'test_err':
# marks the end of an experiment sweep. record the test error
curr_expt['test_err'] = extractTestErr(line)
curr_expt['lowest_val_err'] = lowest_val_err
#append to expt list
expts.append(curr_expt)
elif out == 'c_start':
# marks the start of reading the config text
in_config = True
curr_expt['config_text'] = ''
elif out == 'c_end':
# marks the end of reading the config text
in_config = False
elif in_config:
# record the text
curr_expt['config_text'] += line
if 'l2_regularizer' in line:
curr_expt['l2_regularizer'] = float(getStringBetween(line, 'l2_regularizer: ', '\n'))
elif 'dropout' in line:
curr_expt['dropout'] = float(getStringBetween(line, 'dropout: ', '\n'))
elif 'learning_rate' in line:
curr_expt['learning_rate'] = float(getStringBetween(line, 'learning_rate: ', '\n'))
elif out == 'data':
curr_expt['train_data'].append({
'train_loss': extractTrainLoss(line),
'val_loss': extractValLoss(line),
'train_err': extractTrainErr(line),
'val_err': extractValErr(line)
})
lowest_val_err = min(lowest_val_err, extractValErr(line))
return expts
def getXYZ(data, a_name, x_name, y_name, z_name):
# extracts the best validation error and their respective hyperparameters
a = []
x = []
y = []
z = []
for expt in data:
a.append(expt[a_name])
x.append(expt[x_name])
y.append(expt[y_name])
z.append(expt[z_name])
return [np.asarray(ls) for ls in [a, x, y, z]]
def main():
parser = argparse.ArgumentParser(description='Parse the log to get data for plotting')
parser.add_argument('--file', '-f', type=str, help='name of the log file relative to the project root directory', required=True)
args = parser.parse_args()
with open(os.path.join(ROOT_DIR, args.file), 'r') as file:
expt_list = readLog(file)
a, x, y, z = getXYZ(expt_list, 'dropout', 'l2_regularizer', 'learning_rate', 'lowest_val_err')
df = pd.DataFrame({
'dropout': a,
'l2_regularizer': x,
'learning_rate': y,
'lowest_val_err': z
})
df.to_csv(os.path.join(ROOT_DIR, os.path.dirname(args.file), 'sweeps.csv'))
if __name__ == '__main__':
main() | 0.327561 | 0.196094 |
from enum import IntFlag
from typing import cast, List, Tuple, Iterable, TextIO
from itertools import takewhile
from qcodes import VisaInstrument, InstrumentChannel, ChannelList
from qcodes.utils.validators import Enum, Numbers
from qcodes.instrument.group_parameter import GroupParameter, Group
def read_curve_file(curve_file: TextIO) -> dict:
"""
Read a curve file with extension .330
The file format of this file is shown in test_lakeshore_file_parser.py
in the test module
The output is a dictionary with keys: "metadata" and "data".
The metadata dictionary contains the first n lines of the curve file which
are in the format "item: value". The data dictionary contains the actual
curve data.
"""
def split_data_line(line: str, parser: type = str) -> List[str]:
return [parser(i) for i in line.split(" ") if i != ""]
def strip(strings: Iterable[str]) -> Tuple:
return tuple(s.strip() for s in strings)
lines = iter(curve_file.readlines())
# Meta data lines contain a colon
metadata_lines = takewhile(lambda s: ":" in s, lines)
# Data from the file is collected in the following dict
file_data = dict()
# Capture meta data
parsed_lines = [strip(line.split(":")) for line in metadata_lines]
file_data["metadata"] = {key: value for key, value in parsed_lines}
# After meta data we have a data header
header_items = strip(split_data_line(next(lines)))
# After that we have the curve data
data = [
split_data_line(line, parser=float)
for line in lines if line.strip() != ""
]
file_data["data"] = dict(
zip(header_items, zip(*data))
)
return file_data
def get_sanitize_data(file_data: dict) -> dict:
"""
Data as found in the curve files are slightly different then
the dictionary as expected by the 'upload_curve' method of the
driver
"""
data_dict = dict(file_data["data"])
# We do not need the index column
del data_dict["No."]
# Rename the 'Units' column to the appropriate name
# Look up under the 'Data Format' entry to find what units we have
data_format = file_data['metadata']['Data Format']
# This is a string in the form '4 (Log Ohms/Kelvin)'
data_format_int = int(data_format.split()[0])
correct_name = Model_325_Curve.valid_sensor_units[data_format_int - 1]
# Rename the column
data_dict[correct_name] = data_dict["Units"]
del data_dict["Units"]
return data_dict
class Status(IntFlag):
sensor_units_overrang = 128
sensor_units_zero = 64
temp_overrange = 32
temp_underrange = 16
invalid_reading = 1
class Model_325_Curve(InstrumentChannel):
valid_sensor_units = ["mV", "V", "Ohm", "log Ohm"]
temperature_key = "Temperature (K)"
def __init__(self, parent: 'Model_325', index: int) -> None:
self._index = index
name = f"curve_{index}"
super().__init__(parent, name)
self.add_parameter(
"serial_number",
parameter_class=GroupParameter
)
self.add_parameter(
"format",
val_mapping={
f"{unt}/K": i+1 for i, unt in enumerate(self.valid_sensor_units)
},
parameter_class=GroupParameter
)
self.add_parameter(
"limit_value",
parameter_class=GroupParameter
)
self.add_parameter(
"coefficient",
val_mapping={
"negative": 1,
"positive": 2
},
parameter_class=GroupParameter
)
self.add_parameter(
"curve_name",
parameter_class=GroupParameter
)
Group(
[
self.curve_name, self.serial_number, self.format,
self.limit_value, self.coefficient
],
set_cmd=f"CRVHDR {self._index}, {{curve_name}}, "
f"{{serial_number}}, {{format}}, {{limit_value}}, "
f"{{coefficient}}",
get_cmd=f"CRVHDR? {self._index}"
)
def get_data(self) -> dict:
curve = [
float(a) for point_index in range(1, 200)
for a in self.ask(f"CRVPT? {self._index}, {point_index}").split(",")
]
d = {self.temperature_key: curve[1::2]}
sensor_unit = self.format().split("/")[0]
d[sensor_unit] = curve[::2]
return d
@classmethod
def validate_datadict(cls, data_dict: dict) -> str:
"""
A data dict has two keys, one of which is 'Temperature (K)'. The other
contains the units in which the curve is defined and must be one of:
'mV', 'V', 'Ohm' or 'log Ohm'
This method validates this and returns the sensor unit encountered in
the data dict
"""
if cls.temperature_key not in data_dict:
raise ValueError(f"At least {cls.temperature_key} needed in the "
f"data dictionary")
sensor_units = [i for i in data_dict.keys() if i != cls.temperature_key]
if len(sensor_units) != 1:
raise ValueError(
"Data dictionary should have one other key, other then "
"'Temperature (K)'"
)
sensor_unit = sensor_units[0]
if sensor_unit not in cls.valid_sensor_units:
raise ValueError(
f"Sensor unit {sensor_unit} invalid. This needs to be one of "
f"{', '.join(cls.valid_sensor_units)}"
)
data_size = len(data_dict[cls.temperature_key])
if data_size != len(data_dict[sensor_unit]) or data_size > 200:
raise ValueError("The length of the temperature axis should be "
"the same as the length of the sensor axis and "
"should not exceed 200 in size")
return sensor_unit
def set_data(self, data_dict: dict, sensor_unit: str = None) -> None:
"""
Set the curve data according to the values found the the dictionary.
Args:
data_dict (dict): See `validate_datadict` to see the format of this
dictionary
sensor_unit (str): If None, the data dict is validated and the
units are extracted.
"""
if sensor_unit is None:
sensor_unit = self.validate_datadict(data_dict)
temperature_values = data_dict[self.temperature_key]
sensor_values = data_dict[sensor_unit]
for value_index, (temperature_value, sensor_value) in \
enumerate(zip(temperature_values, sensor_values)):
cmd_str = f"CRVPT {self._index}, {value_index + 1}, " \
f"{sensor_value:3.3f}, {temperature_value:3.3f}"
self.write(cmd_str)
class Model_325_Sensor(InstrumentChannel):
"""
A single sensor of a Lakeshore 325.
Args:
parent (Model_325): The instrument this heater belongs to
name (str)
inp (str): Either "A" or "B"
"""
def __init__(self, parent: 'Model_325', name: str, inp: str) -> None:
if inp not in ["A", "B"]:
raise ValueError("Please either specify input 'A' or 'B'")
super().__init__(parent, name)
self._input = inp
self.add_parameter(
'temperature',
get_cmd='KRDG? {}'.format(self._input),
get_parser=float,
label='Temperature',
unit='K'
)
self.add_parameter(
'status',
get_cmd='RDGST? {}'.format(self._input),
get_parser=lambda status: self.decode_sensor_status(int(status)),
label='Sensor_Status'
)
self.add_parameter(
"type",
val_mapping={
"Silicon diode": 0,
"GaAlAs diode": 1,
"100 Ohm platinum/250": 2,
"100 Ohm platinum/500": 3,
"1000 Ohm platinum": 4,
"NTC RTD": 5,
"Thermocouple 25mV": 6,
"Thermocouple 50 mV": 7,
"2.5 V, 1 mA": 8,
"7.5 V, 1 mA": 9
},
parameter_class=GroupParameter
)
self.add_parameter(
"compensation",
vals=Enum(0, 1),
parameter_class=GroupParameter
)
Group(
[self.type, self.compensation],
set_cmd=f"INTYPE {self._input}, {{type}}, {{compensation}}",
get_cmd=f"INTYPE? {self._input}"
)
self.add_parameter(
"curve_index",
set_cmd=f"INCRV {self._input}, {{}}",
get_cmd=f"INCRV? {self._input}",
get_parser=int,
vals=Numbers(min_value=1, max_value=35)
)
@staticmethod
def decode_sensor_status(sum_of_codes: int) -> str:
total_status = Status(sum_of_codes)
if sum_of_codes == 0:
return 'OK'
status_messages = [st.name.replace('_', ' ') for st in Status
if st in total_status]
return ", ".join(status_messages)
@property
def curve(self) -> Model_325_Curve:
parent = cast(Model_325, self.parent)
return Model_325_Curve(parent, self.curve_index())
class Model_325_Heater(InstrumentChannel):
"""
Heater control for the Lakeshore 325.
Args:
parent (Model_325): The instrument this heater belongs to
name (str)
loop (int): Either 1 or 2
"""
def __init__(self, parent: 'Model_325', name: str, loop: int) -> None:
if loop not in [1, 2]:
raise ValueError("Please either specify loop 1 or 2")
super().__init__(parent, name)
self._loop = loop
self.add_parameter(
"control_mode",
get_cmd=f"CMODE? {self._loop}",
set_cmd=f"CMODE {self._loop},{{}}",
val_mapping={
"Manual PID": "1",
"Zone": "2",
"Open Loop": "3",
"AutoTune PID": "4",
"AutoTune PI": "5",
"AutoTune P": "6"
}
)
self.add_parameter(
"input_channel",
vals=Enum("A", "B"),
parameter_class=GroupParameter
)
self.add_parameter(
"unit",
val_mapping={
"Kelvin": "1",
"Celsius": "2",
"Sensor Units": "3"
},
parameter_class=GroupParameter
)
self.add_parameter(
'powerup_enable',
val_mapping={True: 1, False: 0},
parameter_class=GroupParameter
)
self.add_parameter(
"output_metric",
val_mapping={
"current": "1",
"power": "2",
},
parameter_class=GroupParameter
)
Group(
[self.input_channel, self.unit, self.powerup_enable,
self.output_metric],
set_cmd=f"CSET {self._loop}, {{input_channel}}, {{unit}}, "
f"{{powerup_enable}}, {{output_metric}}",
get_cmd=f"CSET? {self._loop}"
)
self.add_parameter(
'P',
vals=Numbers(0, 1000),
get_parser=float,
parameter_class=GroupParameter
)
self.add_parameter(
'I',
vals=Numbers(0, 1000),
get_parser=float,
parameter_class=GroupParameter
)
self.add_parameter(
'D',
vals=Numbers(0, 1000),
get_parser=float,
parameter_class=GroupParameter
)
Group(
[self.P, self.I, self.D],
set_cmd=f'PID {self._loop}, {{P}}, {{I}}, {{D}}',
get_cmd=f'PID? {self._loop}'
)
if self._loop == 1:
valid_output_ranges = Enum(0, 1, 2)
else:
valid_output_ranges = Enum(0, 1)
self.add_parameter(
'output_range',
vals=valid_output_ranges,
set_cmd=f'RANGE {self._loop}, {{}}',
get_cmd=f'RANGE? {self._loop}',
val_mapping={
"Off": '0',
"Low (2.5W)": '1',
"High (25W)": '2'
}
)
self.add_parameter(
'setpoint',
vals=Numbers(0, 400),
get_parser=float,
set_cmd=f'SETP {self._loop}, {{}}',
get_cmd=f'SETP? {self._loop}'
)
self.add_parameter(
"ramp_state",
vals=Enum(0, 1),
parameter_class=GroupParameter
)
self.add_parameter(
"ramp_rate",
vals=Numbers(0, 100 / 60 * 1E3),
unit="mK/s",
parameter_class=GroupParameter,
get_parser=lambda v: float(v) / 60 * 1E3, # We get values in K/min,
set_parser=lambda v: v * 60 * 1E-3 # Convert to K/min
)
Group(
[self.ramp_state, self.ramp_rate],
set_cmd=f"RAMP {self._loop}, {{ramp_state}}, {{ramp_rate}}",
get_cmd=f"RAMP? {self._loop}"
)
self.add_parameter(
"is_ramping",
get_cmd=f"RAMPST? {self._loop}"
)
self.add_parameter(
"resistance",
get_cmd=f"HTRRES? {self._loop}",
set_cmd=f"HTRRES {self._loop}, {{}}",
val_mapping={
25: 1,
50: 2,
},
label='Resistance',
unit="Ohm"
)
self.add_parameter(
"heater_output",
get_cmd=f"HTR? {self._loop}",
get_parser=float,
label='Heater Output',
unit="%"
)
class Model_325(VisaInstrument):
"""
Lakeshore Model 325 Temperature Controller Driver
"""
def __init__(self, name: str, address: str, **kwargs) -> None:
super().__init__(name, address, terminator="\r\n", **kwargs)
sensors = ChannelList(
self, "sensor", Model_325_Sensor, snapshotable=False)
for inp in ['A', 'B']:
sensor = Model_325_Sensor(self, 'sensor_{}'.format(inp), inp)
sensors.append(sensor)
self.add_submodule('sensor_{}'.format(inp), sensor)
sensors.lock()
self.add_submodule("sensor", sensors)
heaters = ChannelList(
self, "heater", Model_325_Heater, snapshotable=False)
for loop in [1, 2]:
heater = Model_325_Heater(self, 'heater_{}'.format(loop), loop)
heaters.append(heater)
self.add_submodule('heater_{}'.format(loop), heater)
heaters.lock()
self.add_submodule("heater", heaters)
curves = ChannelList(
self, "curve", Model_325_Curve, snapshotable=False
)
for curve_index in range(1, 35):
curve = Model_325_Curve(self, curve_index)
curves.append(curve)
self.add_submodule("curve", curves)
self.connect_message()
def upload_curve(
self, index: int, name: str, serial_number: str, data_dict: dict
) -> None:
"""
Upload a curve to the given index
Args:
index (int): The index to upload the curve to. We can only use
indices reserved for user defined curves, 21-35
name (str)
serial_number (str)
data_dict (dict): A dictionary containing the curve data
"""
if index not in range(21, 36):
raise ValueError("index value should be between 21 and 35")
sensor_unit = Model_325_Curve.validate_datadict(data_dict)
curve = self.curve[index - 1]
curve.curve_name(name)
curve.serial_number(serial_number)
curve.format(f"{sensor_unit}/K")
curve.set_data(data_dict, sensor_unit=sensor_unit)
def upload_curve_from_file(self, index: int, file_path: str) -> None:
"""
Upload a curve from a curve file. Note that we only support
curve files with extension .330
"""
if not file_path.endswith(".330"):
raise ValueError("Only curve files with extension .330 are supported")
with open(file_path, "r") as curve_file:
file_data = read_curve_file(curve_file)
data_dict = get_sanitize_data(file_data)
name = file_data["metadata"]["Sensor Model"]
serial_number = file_data["metadata"]["Serial Number"]
self.upload_curve(index, name, serial_number, data_dict) | qcodes/instrument_drivers/Lakeshore/Model_325.py | from enum import IntFlag
from typing import cast, List, Tuple, Iterable, TextIO
from itertools import takewhile
from qcodes import VisaInstrument, InstrumentChannel, ChannelList
from qcodes.utils.validators import Enum, Numbers
from qcodes.instrument.group_parameter import GroupParameter, Group
def read_curve_file(curve_file: TextIO) -> dict:
"""
Read a curve file with extension .330
The file format of this file is shown in test_lakeshore_file_parser.py
in the test module
The output is a dictionary with keys: "metadata" and "data".
The metadata dictionary contains the first n lines of the curve file which
are in the format "item: value". The data dictionary contains the actual
curve data.
"""
def split_data_line(line: str, parser: type = str) -> List[str]:
return [parser(i) for i in line.split(" ") if i != ""]
def strip(strings: Iterable[str]) -> Tuple:
return tuple(s.strip() for s in strings)
lines = iter(curve_file.readlines())
# Meta data lines contain a colon
metadata_lines = takewhile(lambda s: ":" in s, lines)
# Data from the file is collected in the following dict
file_data = dict()
# Capture meta data
parsed_lines = [strip(line.split(":")) for line in metadata_lines]
file_data["metadata"] = {key: value for key, value in parsed_lines}
# After meta data we have a data header
header_items = strip(split_data_line(next(lines)))
# After that we have the curve data
data = [
split_data_line(line, parser=float)
for line in lines if line.strip() != ""
]
file_data["data"] = dict(
zip(header_items, zip(*data))
)
return file_data
def get_sanitize_data(file_data: dict) -> dict:
"""
Data as found in the curve files are slightly different then
the dictionary as expected by the 'upload_curve' method of the
driver
"""
data_dict = dict(file_data["data"])
# We do not need the index column
del data_dict["No."]
# Rename the 'Units' column to the appropriate name
# Look up under the 'Data Format' entry to find what units we have
data_format = file_data['metadata']['Data Format']
# This is a string in the form '4 (Log Ohms/Kelvin)'
data_format_int = int(data_format.split()[0])
correct_name = Model_325_Curve.valid_sensor_units[data_format_int - 1]
# Rename the column
data_dict[correct_name] = data_dict["Units"]
del data_dict["Units"]
return data_dict
class Status(IntFlag):
sensor_units_overrang = 128
sensor_units_zero = 64
temp_overrange = 32
temp_underrange = 16
invalid_reading = 1
class Model_325_Curve(InstrumentChannel):
valid_sensor_units = ["mV", "V", "Ohm", "log Ohm"]
temperature_key = "Temperature (K)"
def __init__(self, parent: 'Model_325', index: int) -> None:
self._index = index
name = f"curve_{index}"
super().__init__(parent, name)
self.add_parameter(
"serial_number",
parameter_class=GroupParameter
)
self.add_parameter(
"format",
val_mapping={
f"{unt}/K": i+1 for i, unt in enumerate(self.valid_sensor_units)
},
parameter_class=GroupParameter
)
self.add_parameter(
"limit_value",
parameter_class=GroupParameter
)
self.add_parameter(
"coefficient",
val_mapping={
"negative": 1,
"positive": 2
},
parameter_class=GroupParameter
)
self.add_parameter(
"curve_name",
parameter_class=GroupParameter
)
Group(
[
self.curve_name, self.serial_number, self.format,
self.limit_value, self.coefficient
],
set_cmd=f"CRVHDR {self._index}, {{curve_name}}, "
f"{{serial_number}}, {{format}}, {{limit_value}}, "
f"{{coefficient}}",
get_cmd=f"CRVHDR? {self._index}"
)
def get_data(self) -> dict:
curve = [
float(a) for point_index in range(1, 200)
for a in self.ask(f"CRVPT? {self._index}, {point_index}").split(",")
]
d = {self.temperature_key: curve[1::2]}
sensor_unit = self.format().split("/")[0]
d[sensor_unit] = curve[::2]
return d
@classmethod
def validate_datadict(cls, data_dict: dict) -> str:
"""
A data dict has two keys, one of which is 'Temperature (K)'. The other
contains the units in which the curve is defined and must be one of:
'mV', 'V', 'Ohm' or 'log Ohm'
This method validates this and returns the sensor unit encountered in
the data dict
"""
if cls.temperature_key not in data_dict:
raise ValueError(f"At least {cls.temperature_key} needed in the "
f"data dictionary")
sensor_units = [i for i in data_dict.keys() if i != cls.temperature_key]
if len(sensor_units) != 1:
raise ValueError(
"Data dictionary should have one other key, other then "
"'Temperature (K)'"
)
sensor_unit = sensor_units[0]
if sensor_unit not in cls.valid_sensor_units:
raise ValueError(
f"Sensor unit {sensor_unit} invalid. This needs to be one of "
f"{', '.join(cls.valid_sensor_units)}"
)
data_size = len(data_dict[cls.temperature_key])
if data_size != len(data_dict[sensor_unit]) or data_size > 200:
raise ValueError("The length of the temperature axis should be "
"the same as the length of the sensor axis and "
"should not exceed 200 in size")
return sensor_unit
def set_data(self, data_dict: dict, sensor_unit: str = None) -> None:
"""
Set the curve data according to the values found the the dictionary.
Args:
data_dict (dict): See `validate_datadict` to see the format of this
dictionary
sensor_unit (str): If None, the data dict is validated and the
units are extracted.
"""
if sensor_unit is None:
sensor_unit = self.validate_datadict(data_dict)
temperature_values = data_dict[self.temperature_key]
sensor_values = data_dict[sensor_unit]
for value_index, (temperature_value, sensor_value) in \
enumerate(zip(temperature_values, sensor_values)):
cmd_str = f"CRVPT {self._index}, {value_index + 1}, " \
f"{sensor_value:3.3f}, {temperature_value:3.3f}"
self.write(cmd_str)
class Model_325_Sensor(InstrumentChannel):
"""
A single sensor of a Lakeshore 325.
Args:
parent (Model_325): The instrument this heater belongs to
name (str)
inp (str): Either "A" or "B"
"""
def __init__(self, parent: 'Model_325', name: str, inp: str) -> None:
if inp not in ["A", "B"]:
raise ValueError("Please either specify input 'A' or 'B'")
super().__init__(parent, name)
self._input = inp
self.add_parameter(
'temperature',
get_cmd='KRDG? {}'.format(self._input),
get_parser=float,
label='Temperature',
unit='K'
)
self.add_parameter(
'status',
get_cmd='RDGST? {}'.format(self._input),
get_parser=lambda status: self.decode_sensor_status(int(status)),
label='Sensor_Status'
)
self.add_parameter(
"type",
val_mapping={
"Silicon diode": 0,
"GaAlAs diode": 1,
"100 Ohm platinum/250": 2,
"100 Ohm platinum/500": 3,
"1000 Ohm platinum": 4,
"NTC RTD": 5,
"Thermocouple 25mV": 6,
"Thermocouple 50 mV": 7,
"2.5 V, 1 mA": 8,
"7.5 V, 1 mA": 9
},
parameter_class=GroupParameter
)
self.add_parameter(
"compensation",
vals=Enum(0, 1),
parameter_class=GroupParameter
)
Group(
[self.type, self.compensation],
set_cmd=f"INTYPE {self._input}, {{type}}, {{compensation}}",
get_cmd=f"INTYPE? {self._input}"
)
self.add_parameter(
"curve_index",
set_cmd=f"INCRV {self._input}, {{}}",
get_cmd=f"INCRV? {self._input}",
get_parser=int,
vals=Numbers(min_value=1, max_value=35)
)
@staticmethod
def decode_sensor_status(sum_of_codes: int) -> str:
total_status = Status(sum_of_codes)
if sum_of_codes == 0:
return 'OK'
status_messages = [st.name.replace('_', ' ') for st in Status
if st in total_status]
return ", ".join(status_messages)
@property
def curve(self) -> Model_325_Curve:
parent = cast(Model_325, self.parent)
return Model_325_Curve(parent, self.curve_index())
class Model_325_Heater(InstrumentChannel):
"""
Heater control for the Lakeshore 325.
Args:
parent (Model_325): The instrument this heater belongs to
name (str)
loop (int): Either 1 or 2
"""
def __init__(self, parent: 'Model_325', name: str, loop: int) -> None:
if loop not in [1, 2]:
raise ValueError("Please either specify loop 1 or 2")
super().__init__(parent, name)
self._loop = loop
self.add_parameter(
"control_mode",
get_cmd=f"CMODE? {self._loop}",
set_cmd=f"CMODE {self._loop},{{}}",
val_mapping={
"Manual PID": "1",
"Zone": "2",
"Open Loop": "3",
"AutoTune PID": "4",
"AutoTune PI": "5",
"AutoTune P": "6"
}
)
self.add_parameter(
"input_channel",
vals=Enum("A", "B"),
parameter_class=GroupParameter
)
self.add_parameter(
"unit",
val_mapping={
"Kelvin": "1",
"Celsius": "2",
"Sensor Units": "3"
},
parameter_class=GroupParameter
)
self.add_parameter(
'powerup_enable',
val_mapping={True: 1, False: 0},
parameter_class=GroupParameter
)
self.add_parameter(
"output_metric",
val_mapping={
"current": "1",
"power": "2",
},
parameter_class=GroupParameter
)
Group(
[self.input_channel, self.unit, self.powerup_enable,
self.output_metric],
set_cmd=f"CSET {self._loop}, {{input_channel}}, {{unit}}, "
f"{{powerup_enable}}, {{output_metric}}",
get_cmd=f"CSET? {self._loop}"
)
self.add_parameter(
'P',
vals=Numbers(0, 1000),
get_parser=float,
parameter_class=GroupParameter
)
self.add_parameter(
'I',
vals=Numbers(0, 1000),
get_parser=float,
parameter_class=GroupParameter
)
self.add_parameter(
'D',
vals=Numbers(0, 1000),
get_parser=float,
parameter_class=GroupParameter
)
Group(
[self.P, self.I, self.D],
set_cmd=f'PID {self._loop}, {{P}}, {{I}}, {{D}}',
get_cmd=f'PID? {self._loop}'
)
if self._loop == 1:
valid_output_ranges = Enum(0, 1, 2)
else:
valid_output_ranges = Enum(0, 1)
self.add_parameter(
'output_range',
vals=valid_output_ranges,
set_cmd=f'RANGE {self._loop}, {{}}',
get_cmd=f'RANGE? {self._loop}',
val_mapping={
"Off": '0',
"Low (2.5W)": '1',
"High (25W)": '2'
}
)
self.add_parameter(
'setpoint',
vals=Numbers(0, 400),
get_parser=float,
set_cmd=f'SETP {self._loop}, {{}}',
get_cmd=f'SETP? {self._loop}'
)
self.add_parameter(
"ramp_state",
vals=Enum(0, 1),
parameter_class=GroupParameter
)
self.add_parameter(
"ramp_rate",
vals=Numbers(0, 100 / 60 * 1E3),
unit="mK/s",
parameter_class=GroupParameter,
get_parser=lambda v: float(v) / 60 * 1E3, # We get values in K/min,
set_parser=lambda v: v * 60 * 1E-3 # Convert to K/min
)
Group(
[self.ramp_state, self.ramp_rate],
set_cmd=f"RAMP {self._loop}, {{ramp_state}}, {{ramp_rate}}",
get_cmd=f"RAMP? {self._loop}"
)
self.add_parameter(
"is_ramping",
get_cmd=f"RAMPST? {self._loop}"
)
self.add_parameter(
"resistance",
get_cmd=f"HTRRES? {self._loop}",
set_cmd=f"HTRRES {self._loop}, {{}}",
val_mapping={
25: 1,
50: 2,
},
label='Resistance',
unit="Ohm"
)
self.add_parameter(
"heater_output",
get_cmd=f"HTR? {self._loop}",
get_parser=float,
label='Heater Output',
unit="%"
)
class Model_325(VisaInstrument):
"""
Lakeshore Model 325 Temperature Controller Driver
"""
def __init__(self, name: str, address: str, **kwargs) -> None:
super().__init__(name, address, terminator="\r\n", **kwargs)
sensors = ChannelList(
self, "sensor", Model_325_Sensor, snapshotable=False)
for inp in ['A', 'B']:
sensor = Model_325_Sensor(self, 'sensor_{}'.format(inp), inp)
sensors.append(sensor)
self.add_submodule('sensor_{}'.format(inp), sensor)
sensors.lock()
self.add_submodule("sensor", sensors)
heaters = ChannelList(
self, "heater", Model_325_Heater, snapshotable=False)
for loop in [1, 2]:
heater = Model_325_Heater(self, 'heater_{}'.format(loop), loop)
heaters.append(heater)
self.add_submodule('heater_{}'.format(loop), heater)
heaters.lock()
self.add_submodule("heater", heaters)
curves = ChannelList(
self, "curve", Model_325_Curve, snapshotable=False
)
for curve_index in range(1, 35):
curve = Model_325_Curve(self, curve_index)
curves.append(curve)
self.add_submodule("curve", curves)
self.connect_message()
def upload_curve(
self, index: int, name: str, serial_number: str, data_dict: dict
) -> None:
"""
Upload a curve to the given index
Args:
index (int): The index to upload the curve to. We can only use
indices reserved for user defined curves, 21-35
name (str)
serial_number (str)
data_dict (dict): A dictionary containing the curve data
"""
if index not in range(21, 36):
raise ValueError("index value should be between 21 and 35")
sensor_unit = Model_325_Curve.validate_datadict(data_dict)
curve = self.curve[index - 1]
curve.curve_name(name)
curve.serial_number(serial_number)
curve.format(f"{sensor_unit}/K")
curve.set_data(data_dict, sensor_unit=sensor_unit)
def upload_curve_from_file(self, index: int, file_path: str) -> None:
"""
Upload a curve from a curve file. Note that we only support
curve files with extension .330
"""
if not file_path.endswith(".330"):
raise ValueError("Only curve files with extension .330 are supported")
with open(file_path, "r") as curve_file:
file_data = read_curve_file(curve_file)
data_dict = get_sanitize_data(file_data)
name = file_data["metadata"]["Sensor Model"]
serial_number = file_data["metadata"]["Serial Number"]
self.upload_curve(index, name, serial_number, data_dict) | 0.912302 | 0.53279 |
"""Step functions for optimization methods."""
from absl import logging
import jax
from jax import numpy as jnp
from jax.flatten_util import ravel_pytree
def gradient_descent_step(data, loss_f, model_param, options):
"""Gradient Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
# Handle deep nets
grad, unravel_fn = ravel_pytree(grad)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad
return unravel_fn(model_param), options
def backtracking(next_candidate, stop_cond, step_size_init, options, verbose=0):
"""Backtracking line search.
Args:
next_candidate: a function generating a candidate from a step size.
stop_cond: a function determining whether to stop or not from a step size
and a candidate.
step_size_init: the initial step size to try.
options: a dictionary containing line search specific options.
verbose: whether to enable verbose output or not.
Returns:
step_size, next_candidate
"""
max_iter = options.get('max_linesearch', 20)
step_factor = options.get('step_factor', 0.5)
step_size = step_size_init
next_iter = next_candidate(step_size)
for it in range(max_iter):
if stop_cond(step_size, next_iter):
break
step_size *= step_factor
next_iter = next_candidate(step_size)
if it == max_iter - 1 and verbose:
print('Line search did not converge.')
return step_size, next_iter
def gradient_descent_line_search_step(
data, loss_f, model_param, options):
"""Gradient Descent optimization with line search step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and updated step size.
"""
options = dict(options)
beta = options.get('beta', 0.9)
beta_prime = options.get('beta_prime', 1e-4)
step_size = options.get('step_size', 10000.0)
verbose = options.get('verbose', False)
reuse_last_step = options.get('reuse_last_step', False)
inputs, labels = data[0], data[1]
loss_with_data_f = lambda param: loss_f(param, inputs, labels)
value_and_grad_f = jax.value_and_grad(loss_with_data_f)
value, grad = value_and_grad_f(model_param)
# Maximum learning rate allowed from Theorem 5 in Gunasekar et al. 2017
if options['bound_step']:
# Bound by dual of L2
b_const = jnp.max(jnp.linalg.norm(inputs, ord=2, axis=0))
step_size = min(step_size, 1 / (b_const * b_const * value))
grad, unravel_fn = ravel_pytree(grad)
x, unravel_fn = ravel_pytree(model_param)
# If we normalize step_size will be harder to tune.
direction = -grad
# TODO(fartash): consider using the condition in FISTA
def next_candidate(step_size):
next_iter = x + step_size * direction
next_value, next_grad = value_and_grad_f(unravel_fn(next_iter))
next_grad, _ = ravel_pytree(next_grad)
return next_iter, next_value, next_grad
def stop_cond(step_size, res):
_, next_value, next_grad = res
gd = jnp.sum(grad * direction)
# Strong Wolfe condition.
cond1 = next_value <= value + beta_prime * step_size * gd
cond2 = jnp.sum(jnp.abs(next_grad * direction)) >= beta * gd
return cond1 and cond2
step_size, res = backtracking(
next_candidate, stop_cond, step_size, options=options)
next_param = res[0]
if reuse_last_step:
options['step_size'] = step_size
if verbose:
logging.info('Step size: %f', step_size)
return unravel_fn(next_param), options
def coordinate_descent_step(data, loss_f, model_param, options):
"""Gradient Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
grad_max = grad * (jnp.abs(grad) == jnp.abs(grad).max())
# Handle deep nets
grad_max, unravel_fn = ravel_pytree(grad_max)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad_max
return unravel_fn(model_param), options
def coordinate_descent_line_search_step(data, loss_f, model_param, options):
"""Coordinate Descent with line search optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
options = dict(options)
beta = options.get('beta', 0.9)
beta_prime = options.get('beta_prime', 1e-4)
step_size = options.get('step_size', 10000.0)
verbose = options.get('verbose', False)
reuse_last_step = options.get('reuse_last_step', False)
inputs, labels = data[0], data[1]
loss_with_data_f = lambda param: loss_f(param, inputs, labels)
value_and_grad_f = jax.value_and_grad(loss_with_data_f)
value, grad = value_and_grad_f(model_param)
grad_max = grad * (jnp.abs(grad) == jnp.abs(grad).max())
# Maximum learning rate allowed from Theorem 5 in Gunasekar et al. 2017
if options['bound_step']:
# Bound by dual of L1
b_const = jnp.max(jnp.linalg.norm(inputs, ord=jnp.inf, axis=0))
step_size = min(step_size, 1 / (b_const * b_const * value))
# Handle deep nets
grad_max, unravel_fn = ravel_pytree(grad_max)
x, unravel_fn = ravel_pytree(model_param)
# If we normalize step_size will be harder to tune.
direction = -grad_max
# TODO(fartash): consider using the condition in FISTA
def next_candidate(step_size):
next_iter = x + step_size * direction
next_value, next_grad = value_and_grad_f(unravel_fn(next_iter))
next_grad, _ = ravel_pytree(next_grad)
return next_iter, next_value, next_grad
def stop_cond(step_size, res):
_, next_value, next_grad = res
gd = jnp.sum(grad * direction)
# Strong Wolfe condition.
cond1 = next_value <= value + beta_prime * step_size * gd
cond2 = jnp.sum(jnp.abs(next_grad * direction)) >= beta * gd
return cond1 and cond2
step_size, res = backtracking(
next_candidate, stop_cond, step_size, options=options)
next_param = res[0]
if reuse_last_step:
options['step_size'] = step_size
if verbose:
logging.info('Step size: %f', step_size)
return unravel_fn(next_param), options
def coordinate_descent_topk_step(data, loss_f, model_param, options, k=2):
"""Coordinate Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
k: An integere for the number of topk elements.
Returns:
Updated model parameters and options.
"""
# TODO(fartash): add k to config.py to be part of options.
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
_, coords = jax.lax.top_k(jnp.abs(grad.T), k)
grad_max = 0 * grad
grad_max = jax.ops.index_update(grad_max, coords, grad[coords])
# Handle deep nets
grad_max, unravel_fn = ravel_pytree(grad_max)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad_max
return unravel_fn(model_param), options
def sign_gradient_descent_step(data, loss_f, model_param, options):
"""Sign Gradient Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
grad_sign = jnp.abs(grad).sum() * jnp.sign(grad)
# Handle deep nets
grad_sign, unravel_fn = ravel_pytree(grad_sign)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad_sign
return unravel_fn(model_param), options
def fista_step(data, loss_and_prox_op, model_param, options):
"""Fista optimization step for solving regularized problem.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_and_prox_op: Tuple of (loss_f, prox_g)
loss_f is the loss function that takes in model_param, inputs, and labels.
prox_g is the proximity operator for g.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and updated step size.
"""
options = dict(options)
step_size = options.get('step_size', 1.0)
acceleration = options.get('acceleration', True)
t = options.get('t', 1.0)
verbose = options.get('verbose', False)
reuse_last_step = options.get('reuse_last_step', False)
loss_f, prox_g = loss_and_prox_op
inputs, labels = data[0], data[1]
fun_f = lambda param: loss_f(param, inputs, labels)
value_and_grad_f = jax.value_and_grad(fun_f)
x, unravel_fn = ravel_pytree(model_param)
y = options.get('y', x)
value_f, grad_f = value_and_grad_f(unravel_fn(y))
grad_f, unravel_fn = ravel_pytree(grad_f)
def next_candidate(step_size):
return prox_g(y - grad_f * step_size, step_size)
def stop_cond(step_size, next_iter):
diff = next_iter - y
sqdist = jnp.sum(diff**2)
# We do not compute the non-smooth term (g in the paper)
# as it cancels out from value_F and value_Q.
value_bigf = fun_f(next_iter)
value_bigq = value_f + jnp.sum(diff * grad_f) + 0.5 / step_size * sqdist
return value_bigf <= value_bigq
x_old = x
step_size, x = backtracking(next_candidate, stop_cond, step_size, options)
# Acceleration.
if acceleration:
t_next = (1 + jnp.sqrt(1 + 4 * t**2)) / 2.
y = x + (t - 1) / t_next * (x - x_old)
t = t_next
options['y'] = y
options['t'] = t
else:
y = x
if reuse_last_step:
options['step_size'] = step_size
if verbose:
logging.info('Step size: %f', step_size)
return unravel_fn(x), options
def get_optimizer_step(options):
"""Return an optimizer given its name."""
name = options['name']
if name == 'gd' or name == 'cvxpy': # TODO(fartash): do cvxpy the right way
return gradient_descent_step, options
if name == 'gd_ls':
return gradient_descent_line_search_step, options
if name == 'cd':
return coordinate_descent_step, options
if name == 'cd_ls':
return coordinate_descent_line_search_step, options
if name == 'signgd':
return sign_gradient_descent_step, options
if name == 'cdk':
return coordinate_descent_topk_step, options
if name == 'fista':
return fista_step, options
raise Exception('Invalid optimizer.') | robust_optim/optim.py |
"""Step functions for optimization methods."""
from absl import logging
import jax
from jax import numpy as jnp
from jax.flatten_util import ravel_pytree
def gradient_descent_step(data, loss_f, model_param, options):
"""Gradient Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
# Handle deep nets
grad, unravel_fn = ravel_pytree(grad)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad
return unravel_fn(model_param), options
def backtracking(next_candidate, stop_cond, step_size_init, options, verbose=0):
"""Backtracking line search.
Args:
next_candidate: a function generating a candidate from a step size.
stop_cond: a function determining whether to stop or not from a step size
and a candidate.
step_size_init: the initial step size to try.
options: a dictionary containing line search specific options.
verbose: whether to enable verbose output or not.
Returns:
step_size, next_candidate
"""
max_iter = options.get('max_linesearch', 20)
step_factor = options.get('step_factor', 0.5)
step_size = step_size_init
next_iter = next_candidate(step_size)
for it in range(max_iter):
if stop_cond(step_size, next_iter):
break
step_size *= step_factor
next_iter = next_candidate(step_size)
if it == max_iter - 1 and verbose:
print('Line search did not converge.')
return step_size, next_iter
def gradient_descent_line_search_step(
data, loss_f, model_param, options):
"""Gradient Descent optimization with line search step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and updated step size.
"""
options = dict(options)
beta = options.get('beta', 0.9)
beta_prime = options.get('beta_prime', 1e-4)
step_size = options.get('step_size', 10000.0)
verbose = options.get('verbose', False)
reuse_last_step = options.get('reuse_last_step', False)
inputs, labels = data[0], data[1]
loss_with_data_f = lambda param: loss_f(param, inputs, labels)
value_and_grad_f = jax.value_and_grad(loss_with_data_f)
value, grad = value_and_grad_f(model_param)
# Maximum learning rate allowed from Theorem 5 in Gunasekar et al. 2017
if options['bound_step']:
# Bound by dual of L2
b_const = jnp.max(jnp.linalg.norm(inputs, ord=2, axis=0))
step_size = min(step_size, 1 / (b_const * b_const * value))
grad, unravel_fn = ravel_pytree(grad)
x, unravel_fn = ravel_pytree(model_param)
# If we normalize step_size will be harder to tune.
direction = -grad
# TODO(fartash): consider using the condition in FISTA
def next_candidate(step_size):
next_iter = x + step_size * direction
next_value, next_grad = value_and_grad_f(unravel_fn(next_iter))
next_grad, _ = ravel_pytree(next_grad)
return next_iter, next_value, next_grad
def stop_cond(step_size, res):
_, next_value, next_grad = res
gd = jnp.sum(grad * direction)
# Strong Wolfe condition.
cond1 = next_value <= value + beta_prime * step_size * gd
cond2 = jnp.sum(jnp.abs(next_grad * direction)) >= beta * gd
return cond1 and cond2
step_size, res = backtracking(
next_candidate, stop_cond, step_size, options=options)
next_param = res[0]
if reuse_last_step:
options['step_size'] = step_size
if verbose:
logging.info('Step size: %f', step_size)
return unravel_fn(next_param), options
def coordinate_descent_step(data, loss_f, model_param, options):
"""Gradient Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
grad_max = grad * (jnp.abs(grad) == jnp.abs(grad).max())
# Handle deep nets
grad_max, unravel_fn = ravel_pytree(grad_max)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad_max
return unravel_fn(model_param), options
def coordinate_descent_line_search_step(data, loss_f, model_param, options):
"""Coordinate Descent with line search optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
options = dict(options)
beta = options.get('beta', 0.9)
beta_prime = options.get('beta_prime', 1e-4)
step_size = options.get('step_size', 10000.0)
verbose = options.get('verbose', False)
reuse_last_step = options.get('reuse_last_step', False)
inputs, labels = data[0], data[1]
loss_with_data_f = lambda param: loss_f(param, inputs, labels)
value_and_grad_f = jax.value_and_grad(loss_with_data_f)
value, grad = value_and_grad_f(model_param)
grad_max = grad * (jnp.abs(grad) == jnp.abs(grad).max())
# Maximum learning rate allowed from Theorem 5 in Gunasekar et al. 2017
if options['bound_step']:
# Bound by dual of L1
b_const = jnp.max(jnp.linalg.norm(inputs, ord=jnp.inf, axis=0))
step_size = min(step_size, 1 / (b_const * b_const * value))
# Handle deep nets
grad_max, unravel_fn = ravel_pytree(grad_max)
x, unravel_fn = ravel_pytree(model_param)
# If we normalize step_size will be harder to tune.
direction = -grad_max
# TODO(fartash): consider using the condition in FISTA
def next_candidate(step_size):
next_iter = x + step_size * direction
next_value, next_grad = value_and_grad_f(unravel_fn(next_iter))
next_grad, _ = ravel_pytree(next_grad)
return next_iter, next_value, next_grad
def stop_cond(step_size, res):
_, next_value, next_grad = res
gd = jnp.sum(grad * direction)
# Strong Wolfe condition.
cond1 = next_value <= value + beta_prime * step_size * gd
cond2 = jnp.sum(jnp.abs(next_grad * direction)) >= beta * gd
return cond1 and cond2
step_size, res = backtracking(
next_candidate, stop_cond, step_size, options=options)
next_param = res[0]
if reuse_last_step:
options['step_size'] = step_size
if verbose:
logging.info('Step size: %f', step_size)
return unravel_fn(next_param), options
def coordinate_descent_topk_step(data, loss_f, model_param, options, k=2):
"""Coordinate Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
k: An integere for the number of topk elements.
Returns:
Updated model parameters and options.
"""
# TODO(fartash): add k to config.py to be part of options.
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
_, coords = jax.lax.top_k(jnp.abs(grad.T), k)
grad_max = 0 * grad
grad_max = jax.ops.index_update(grad_max, coords, grad[coords])
# Handle deep nets
grad_max, unravel_fn = ravel_pytree(grad_max)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad_max
return unravel_fn(model_param), options
def sign_gradient_descent_step(data, loss_f, model_param, options):
"""Sign Gradient Descent optimization step.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_f: The loss function that takes in model_param, inputs, and labels.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and options.
"""
dloss_dw = jax.grad(loss_f, argnums=0)
inputs, labels = data[0], data[1]
grad = dloss_dw(model_param, inputs, labels)
grad_sign = jnp.abs(grad).sum() * jnp.sign(grad)
# Handle deep nets
grad_sign, unravel_fn = ravel_pytree(grad_sign)
model_param, unravel_fn = ravel_pytree(model_param)
model_param -= options['lr'] * grad_sign
return unravel_fn(model_param), options
def fista_step(data, loss_and_prox_op, model_param, options):
"""Fista optimization step for solving regularized problem.
Args:
data: A tuple of inputs and labels passed to the loss function.
loss_and_prox_op: Tuple of (loss_f, prox_g)
loss_f is the loss function that takes in model_param, inputs, and labels.
prox_g is the proximity operator for g.
model_param: Current model parameters to be passed to loss_f.
options: A dictionary of optimizer specific hyper-parameters.
Returns:
Updated model parameters and updated step size.
"""
options = dict(options)
step_size = options.get('step_size', 1.0)
acceleration = options.get('acceleration', True)
t = options.get('t', 1.0)
verbose = options.get('verbose', False)
reuse_last_step = options.get('reuse_last_step', False)
loss_f, prox_g = loss_and_prox_op
inputs, labels = data[0], data[1]
fun_f = lambda param: loss_f(param, inputs, labels)
value_and_grad_f = jax.value_and_grad(fun_f)
x, unravel_fn = ravel_pytree(model_param)
y = options.get('y', x)
value_f, grad_f = value_and_grad_f(unravel_fn(y))
grad_f, unravel_fn = ravel_pytree(grad_f)
def next_candidate(step_size):
return prox_g(y - grad_f * step_size, step_size)
def stop_cond(step_size, next_iter):
diff = next_iter - y
sqdist = jnp.sum(diff**2)
# We do not compute the non-smooth term (g in the paper)
# as it cancels out from value_F and value_Q.
value_bigf = fun_f(next_iter)
value_bigq = value_f + jnp.sum(diff * grad_f) + 0.5 / step_size * sqdist
return value_bigf <= value_bigq
x_old = x
step_size, x = backtracking(next_candidate, stop_cond, step_size, options)
# Acceleration.
if acceleration:
t_next = (1 + jnp.sqrt(1 + 4 * t**2)) / 2.
y = x + (t - 1) / t_next * (x - x_old)
t = t_next
options['y'] = y
options['t'] = t
else:
y = x
if reuse_last_step:
options['step_size'] = step_size
if verbose:
logging.info('Step size: %f', step_size)
return unravel_fn(x), options
def get_optimizer_step(options):
"""Return an optimizer given its name."""
name = options['name']
if name == 'gd' or name == 'cvxpy': # TODO(fartash): do cvxpy the right way
return gradient_descent_step, options
if name == 'gd_ls':
return gradient_descent_line_search_step, options
if name == 'cd':
return coordinate_descent_step, options
if name == 'cd_ls':
return coordinate_descent_line_search_step, options
if name == 'signgd':
return sign_gradient_descent_step, options
if name == 'cdk':
return coordinate_descent_topk_step, options
if name == 'fista':
return fista_step, options
raise Exception('Invalid optimizer.') | 0.933749 | 0.730843 |
from __future__ import division
import tensorflow as tf
from ACNET4_test import ACNet
import numpy as np
import json
import os
import time
import pickle
import tensorflow.contrib.layers as layers
import matplotlib.pyplot as plt
import threading
from datetime import datetime
import copy
import scipy.signal as signal
import sys
from NewAgentInitObs import StateMaskingObs as TrafficLightObs
from flatland.envs.rail_env import RailEnv
from flatland.utils.rendertools import RenderTool
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.core.grid.grid_utils import distance_on_rail as manhattan_distance
import imageio
import random
environment_path = "saved_environments"
class FLATLAND(object):
'''
This class provides functionality for running multiple instances of the
trained network in a single environment
'''
def __init__(self, model_path, obs_size, TEST_FLATLAND_ENVIRONMENTS,saveGIF,gifs_path):
self.obs_size = obs_size
self.ADDITIONAL_INPUT = 6
self.TEST_FLATLAND_ENVIRONMENTS = TEST_FLATLAND_ENVIRONMENTS
self.PRUNE_ACTIONS = True
self.saveGIF = saveGIF
self.SAVEGIFFREQUENCY = 5
self.SKIPLARGE = True
self.gifs_path = gifs_path
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.network = ACNet("global", 4, None, False, "global", obs_size)
self.episode_count =0
# load the weights from the checkpoint (only the global ones!)
ckpt = tf.train.get_checkpoint_state(model_path)
saver = tf.train.Saver()
saver.restore(self.sess, ckpt.model_checkpoint_path)
def StateClassifier(self, agent_pos, agent_dir):
"""
returns 0 : No decision point
returns 1 : Stopping point (Decision at next cell)
returns 2 : At decision point currently (More than 1 available transition)
returns 3 : MUST STOP point - Agent Ahead
returns 4 : MUST STOP point + Stopping Point
returns None: invalid cell
"""
avb_moves = self.env.rail.get_transitions(*agent_pos, agent_dir)
move2grid = np.array([[[0, -1], [-1, 0], [0, +1]], [[-1, 0], [0, +1], [+1, 0]], [[0, +1], [+1, 0], [0, -1]],
[[+1, 0], [0, -1], [-1, 0]]]) # Obtained from collidingagent code
trans2act = np.array([[2, 3, 0, 1], [1, 2, 3, 0], [0, 1, 2, 3], [
3, 0, 1, 2]]) # Maps transition to an action
# next_dir_grid = np.array([-1,0,1]) # Maps action to a change in agent direction
if sum(avb_moves) > 1: # This is definitely a decision junction since more than 1 move possible
return 2
elif sum(avb_moves) == 1:
# Get the available transition to next cell
avbmove = avb_moves.index(1)
# Get the corresponding action for that transition
action = trans2act[agent_dir][avbmove]
if action == 0:
next_pos = agent_pos + move2grid[(agent_dir + 2) % 4][
1] # This is a dead end, so turn around and move forward
else:
next_pos = agent_pos + move2grid[agent_dir][action - 1]
# next_dir = (agent_dir + (next_dir_grid[action-1]) )%4
sumnextcell = 0 # How many possible transitions at next cell
for i in range(0, 4):
new_avb_moves = self.env.rail.get_transitions(*next_pos, i)
sumnextcell += sum(new_avb_moves)
# Also have to check whether the junction is occupied
Occupied = False
for k in range(len(self.env.agents)):
if self.env.agents[k].position is None:
if self.env.dones[k]:
my_pos = (-3, -3)
else:
my_pos = (-3,-3)
else:
my_pos = self.env.agents[k].position
if my_pos[0] == next_pos[0] and my_pos[1] == next_pos[1]:
Occupied = True
break
if (sumnextcell > 2) and Occupied:
return 4 # The agent is currently at a MUST STOP point
elif (sumnextcell > 2) and (not Occupied): # The agent is at a stopping point
return 1
elif (sumnextcell <= 2) and Occupied:
return 3 # The agent is at a MUST STOP point
else:
return 0 # The agent is at a no decision point
else:
# print("The agent is at an impossible cell") # This happen when checking stopping agents
# print("agent_dir:", agent_dir, " agent_pos:", agent_pos)
return None
def _NextValidActions(self,obs,agentID):
"""
returns list of valid actions
List[0]= LEFT , List[1] = Straight , List[2] = Right , List[3] = Stop
If at NO decision point, just go forward [0]
If at stopping point, look 1 timestep into future and conclude : returns [0,3](stop,go) or [3](stop)
If at junction, get valid directions to go in. No stopping allowed here
If no available direction at junction : return [3](stop) , This means we're screwed
"""
currentobs = obs[0:self.obs_size-self.ADDITIONAL_INPUT]
traffic_signal = obs[self.obs_size-self.ADDITIONAL_INPUT]
homo_junctions = obs[(self.obs_size-3) :self.obs_size]
if traffic_signal == -1:
validactions = [3]
return validactions
currentobs = np.reshape(currentobs, (3, -1))
if self.env.agents[agentID].position is None:
if self.env.dones[agentID]:
actual_dir = self.env.agents[agentID].old_direction
actual_pos = self.env.agents[agentID].target
else:
actual_dir = self.env.agents[agentID].initial_direction
actual_pos = self.env.agents[agentID].initial_position
else:
actual_dir = self.env.agents[agentID].direction
actual_pos = self.env.agents[agentID].position
state = self.StateClassifier(actual_pos, actual_dir)
# currentobs = joint_observations[self.metaAgentID][self.agentID]
if state in [3, 4]: # Must Stop Point
validactions = [3]
return validactions
elif state == 0: # Currently at NO decision point
validactions = [1]
return validactions
elif state == 1: # Currently at stopping point
SolExist = [currentobs[0][0], currentobs[1][0], currentobs[2][0]] # Imagine we are at decision junction
agentsblocking = [currentobs[0][2], currentobs[1][2], currentobs[2][2]]
agentsblockingjunction = [currentobs[0][3], currentobs[1][3], currentobs[2][3]]
agentsdiff = [currentobs[0][4], currentobs[1][4], currentobs[2][4]]
for i in range(0, 3):
# Check if there is any available non-blocked path which leads to a solution
if (SolExist[i] == 1) and (agentsblocking[i] == 0):
validactions = [1, 3] # If there is such a path, then going forward allowed
return validactions
if homo_junctions.count(1) >=2 :
for i in range(0,3) :
if (SolExist[i] == 1) and (agentsblockingjunction[i] == 0) and homo_junctions[i]== 1 :
validactions = [1, 3]
return validactions
validactions = [3] # If there is no such path, only stopping allowed
return validactions
else: # Currently at junction
SolExist = [currentobs[0][0], currentobs[1][0], currentobs[2][0]]
agentsblocking = [currentobs[0][2], currentobs[1][2], currentobs[2][2]]
agentsblockingjunction = [currentobs[0][3], currentobs[1][3], currentobs[2][3]]
agentsdiff = [currentobs[0][4], currentobs[1][4], currentobs[2][4]]
# stoppingoccupied = [currentobs[0][ENTRY_PER_COLUMN - 1],
# currentobs[1][ENTRY_PER_COLUMN - 1],
# currentobs[2][ENTRY_PER_COLUMN - 1]]
validactions = []
for i in range(0, 3):
if (SolExist[i] == 1) and (agentsblocking[i] == 0): # and stoppingoccupied[i]==False:
validactions.append(i)
if validactions:
return validactions
else:
if homo_junctions.count(1) >=2 :
for i in range(0,3) :
if (SolExist[i] == 1) and (agentsblockingjunction[i] == 0) and homo_junctions[i]== 1 :
validactions.append(i)
break
if validactions:
return validactions
return [3]
def getparams(self, size):
tid = np.random.randint(0, 50)
seed = tid * 19997 + 997
random.seed(seed)
nSize = int((size-20)/5)
nr_cities = 2 + nSize // 2 + random.randint(0, 2)
# , 10 + random.randint(0, 10))
nr_trains = min(nr_cities * 5, 5 + random.randint(0, 5))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, nSize)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def make_gif(self,images, fname, duration=2, true_image=False,salience=False,salIMGS=None):
imageio.mimwrite(fname,images,subrectangles=True)
print("\nwrote gif")
def set_env(self, num_agents, id, width, height, max_cities=None, max_rails=None):
if not TEST_FLATLAND_ENVIRONMENTS:
if id % 10 == 0:
while True:
try:
seed, nr_trains, nr_cities,\
max_rails_between_city, max_rails_in_cities, _, _, _ = self.getparams(
width)
print('size:', width)
print('cities:', nr_cities)
print('agents', num_agents)
print('seed', seed)
gameEnv = RailEnv(width=width, height=width, rail_generator=sparse_rail_generator(
max_num_cities=nr_cities, max_rails_between_cities=max_rails_between_city,
max_rails_in_city=max_rails_in_cities, seed=seed, grid_mode=False),
schedule_generator=sparse_schedule_generator(), obs_builder_object=TrafficLightObs(),
number_of_agents=num_agents)
gameEnv.global_reward = 20
gameEnv.step_penalty = -0.3
self.env = gameEnv
obs = self.env.reset(True, True)
break
except Exception:
pass
else:
obs = self.env.reset(True, True)
print('Reset Successfully')
self.episode_count +=1
if self.saveGIF and self.episode_count % self.SAVEGIFFREQUENCY ==0 :
self.env_renderer = RenderTool(self.env)
self.env_renderer.render_env(show=False, frames=False, show_observations=False)
self.episode_frames = [self.env_renderer.get_image()]
return obs
else:
if id == 0:
while True:
try:
tid = np.random.randint(0, 50)
seed = tid * 19997 + 997
gameEnv = RailEnv(width=width, height=height, rail_generator=sparse_rail_generator(
max_num_cities=max_cities, max_rails_between_cities=2,
max_rails_in_city=max_rails, seed=seed, grid_mode=False),
schedule_generator=sparse_schedule_generator(), obs_builder_object=TrafficLightObs(),
number_of_agents=num_agents)
gameEnv.global_reward = 20
gameEnv.step_penalty = -0.3
self.env = gameEnv
obs = self.env.reset(True, True)
break
except Exception:
pass
else:
obs = self.env.reset(True, True)
print('Reset Successfully')
self.episode_count +=1
if self.saveGIF and self.episode_count % self.SAVEGIFFREQUENCY ==0 :
self.env_renderer = RenderTool(self.env)
self.env_renderer.render_env(show=False, frames=False, show_observations=False)
self.episode_frames = [self.env_renderer.get_image()]
return obs
def check_action(self, obs, id, done_tag):
if done_tag == 1:
return 0
initialization = obs[self.obs_size - 5]
previous_pos = self.env.agents[id].position if self.env.agents[id].position \
else self.env.agents[id].initial_position
previous_dir = self.env.agents[id].direction if self.env.agents[id].direction is not None else \
self.env.agents[id].initial_direction
state = self.StateClassifier(previous_pos, previous_dir)
if initialization == 1:
return 0
elif initialization == 0 and self.initialized[id] == 0:
self.initialized[id] = 1
return 2
elif state == 0: # no decision point
return 2
elif state in [3, 4]:
return 4
else:
return -1
def step_all_parallel(self, step, all_obs):
'''advances the state of the environment by a single step across all agents'''
joint_actions = {}
if step == 0:
for agent in range(0, len(self.env.agents)):
o = all_obs[0][agent]
s_feed = np.reshape(o, (1, self.obs_size))
a = self.check_action(o, agent, self.agent_done[agent])
if a == -1:
a_dist = self.sess.run([self.network.policy], feed_dict={
self.network.inputs: s_feed})
a = np.random.choice(
range(a_dist.shape[1]), p=a_dist.ravel()) + 1
joint_actions[agent] = a
else:
if len(self.env.agents) < 81 or ((len(self.env.agents)==100) and ((self.env.height+self.env.width)==200)) or self.SKIP_LARGE == False:
observations =[]
for i in range(0,len(self.env.agents)) :
observations.append(all_obs[i])
s_feed = np.reshape(observations, (len(self.env.agents), self.obs_size))
action_set = self.sess.run([self.network.policy], feed_dict={
self.network.inputs: s_feed})
for agent in range(0, len(self.env.agents)):
o = all_obs[agent]
#s_feed = np.reshape(o, (1, self.obs_size))
a = self.check_action(o, agent, self.agent_done[agent])
if a == -1:
# a_dist = self.sess.run([self.network.policy], feed_dict={
# self.network.inputs: s_feed})
a_dist = action_set[0][agent]
a_dist = np.array(a_dist)
#a_dist = a_dist[0]
if self.PRUNE_ACTIONS :
validactions = self._NextValidActions(o,agent)
if not (np.argmax(a_dist.flatten()) in validactions):
a = np.random.choice(validactions) + 1
else :
a = np.argmax(a_dist.flatten()) + 1
else :
a = np.argmax(a_dist.flatten()) + 1 # a = np.random.choice(range(a_dist.shape[1]), p=a_dist.ravel()) + 1
joint_actions[agent] = a
starttime = time.time()
all_obs, _, all_done, _ = self.env.step(joint_actions)
self.timeobs += round((time.time()-starttime), 2)
return all_obs, all_done
def find_path(self, all_obs, max_step=384):
'''run a full environment to completion, or until max_step steps'''
solution = []
step = 0
self.initialized = [0 for i in range(len(self.env.agents))]
self.agent_done = [0 for i in range(len(self.env.agents))]
self.timeobs =0
while(not self.env.dones["__all__"] and step < max_step):
timestep = []
for agent in range(0, len(self.env.agents)):
position = self.env.agents[agent].position if self.env.agents[agent].position is not None else \
self.env.agents[agent].initial_position
timestep.append(position)
solution.append(np.array(timestep))
all_obs, all_done = self.step_all_parallel(step, all_obs)
for agent in range(0, len(self.env.agents)):
self.agent_done[agent] = all_done[agent]
step += 1
if self.saveGIF and self.episode_count% self.SAVEGIFFREQUENCY ==0 :
self.env_renderer.render_env(show=False, frames=False, show_observations=False)
self.episode_frames.append(self.env_renderer.get_image())
if self.saveGIF and self.episode_count% self.SAVEGIFFREQUENCY ==0 :
time_per_step = 0.1
images = np.array(self.episode_frames)
self.make_gif(images, '{}/test_episode_{:d}_{:d}_{:s}.gif'.format(self.gifs_path,self.episode_count,step,("_success" if self.env.dones["__all__"] else "")))
print('step', step)
print('Done', self.agent_done.count(1), '/', len(self.env.agents))
for agent in range(0, len(self.env.agents)):
position = self.env.agents[agent].position if self.env.agents[
agent].position is not None else self.env.agents[agent].initial_position
timestep.append(position)
all_done = self.agent_done.count(1) == len(self.env.agents)
return np.array(solution), all_done, self.agent_done.count(1), self.timeobs
def make_name(num_agents, size, id, extension, dirname, extra=""):
if extra == "":
return dirname+'/'+"{}_agents_{}_size_{}_id_{}".format(num_agents, size, id, extension)
else:
return dirname+'/'+"{}_agents_{}_size_{}_id_{}{}".format(num_agents, size, id, extra, extension)
def run_simulations(next, flatland_test):
(num_agents, id, width, height, max_cities, max_rails) = next
all_obs = flatland_test.set_env(
num_agents, id, width, height, max_cities, max_rails)
results = dict()
start_time = time.time()
print('Starting test ({},{},{},{})'.format(num_agents, width, height, id))
max_time = int(8*(height + width + (num_agents/max_cities))) -2
path, all_done, num_done , obs_time = flatland_test.find_path(all_obs, max_time)
results['Successful_Agents'] = num_done
results['Observetime'] = obs_time
results['finished'] = True if all_done else False
results['time'] = round((time.time()-start_time), 2)
results['length'] = len(path)
return results
if __name__ == "__main__":
def getfilename() :
today = datetime.today()
d1 = today.strftime("%d-%m")
now = datetime.now()
current_time = now.strftime("%H")
filename = "Flatland_Test" + "_"+ d1 + "_" + current_time + ".txt"
return filename
obs_size = TrafficLightObs.OBS_SIZE
num_agents = 4
num_iterations = 10
min_grid_size = 30
max_grid_size = 80
max_agents = 128
TEST_FLATLAND_ENVIRONMENTS = True
saveGIF = False
filename = str(getfilename())
flatland_environments = [[50, 5, 25, 25, 2, 3, 50], [50, 10, 30, 30, 2, 3, 100], [50, 20, 30, 30, 3, 3, 200], [40, 50, 20, 35, 3, 3, 500],
[30, 80, 35, 20, 5, 3, 800], [30, 80, 35, 35, 5, 4, 800], [
30, 80, 40, 60, 9, 4, 800], [30, 80, 60, 40, 13, 4, 800],
[20, 80, 60, 60, 17, 4, 800], [20, 100, 80, 120, 21, 4, 1000], [
20, 100, 100, 80, 25, 4, 1000], [10, 200, 100, 100, 29, 4, 2000],
[10, 200, 150, 150, 33, 4, 2000], [10, 400, 150, 150, 37, 4, 4000]]
flatland_test = FLATLAND('newmod', obs_size,
TEST_FLATLAND_ENVIRONMENTS,saveGIF,'./gifs_SMObs')
summary_file = open(filename, "w+")
summary_file.write("Summary of Flatland Testing")
summary_file.write("\n")
summary_file.close()
if not TEST_FLATLAND_ENVIRONMENTS:
while num_agents <= max_agents:
num_agents *= 2
print("Starting tests for %d agents" % num_agents)
for size in range(min_grid_size, max_grid_size, 5):
summary_file = open(filename, "a+")
if size != 30:
successful_rate = round(
(100*total_completed/(num_iterations*num_agents)), 2)
episode_success_rate = round(
(100*success_count/num_iterations), 2)
summary_file.write(
"Agent Success Rate: {}".format(successful_rate))
summary_file.write("\n")
summary_file.write(
"Episode Success Rate: {}".format(episode_success_rate))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Size: {} Agents: {}".format(size, num_agents))
summary_file.write("\n")
summary_file.write("\n")
summary_file.close()
total_completed = 0
success_count = 0
total_time = 0
for iter in range(num_iterations):
results = run_simulations(
(num_agents, iter, size, size, 3, None), flatland_test)
total_completed += results['Successful_Agents']
if results['finished'] == True:
success_count += 1
summary_file = open(filename, "a+")
summary_file.write(" Finished: {} CompletedAgents: {} TimeTaken: {} Length: {} ".format(results['finished'],
results['Successful_Agents'], results['time'], results['length']))
summary_file.write("\n")
summary_file.write("\n")
summary_file.close()
else:
total_done = 0
total_agents = 0
TOTAL_TIME = 0
for index in range(len(flatland_environments)):
summary_file = open(filename, "a+")
if index != 0:
num_iterations = flatland_environments[index-1][0]
successful_rate = round(
(100*total_completed/(num_iterations*num_agents)), 2)
episode_success_rate = round(
(100*success_count/num_iterations), 2)
total_done += total_completed
total_agents += num_iterations*num_agents
summary_file.write(
"Agent Success Rate: {}".format(successful_rate))
summary_file.write("\n")
summary_file.write(
"Episode Success Rate: {}".format(episode_success_rate))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Time Taken : {} Minutes".format(round((total_time/60),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Average Time Taken : {} Seconds".format(round((total_time/num_iterations),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Average Observation Time : {} Seconds".format(round((obs_time/num_iterations),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Time Elapsed so Far: {} Minutes".format(round((TOTAL_TIME/60),2)))
summary_file.write("\n")
summary_file.write("\n")
num_agents = flatland_environments[index][1]
width = flatland_environments[index][2]
height = flatland_environments[index][3]
max_cities = flatland_environments[index][4]
max_rails = flatland_environments[index][5]
summary_file.write("Environment: {} Agents: {} Width: {} Height: {}".format(index,num_agents,width,height))
summary_file.write("\n")
summary_file.write("\n")
summary_file.close()
total_completed = 0
total_time = 0
success_count = 0
obs_time = 0
for iter in range(flatland_environments[index][0]):
results = run_simulations(
(num_agents, iter, width, height, max_cities, max_rails), flatland_test)
total_completed += results['Successful_Agents']
total_time+= results['time']
TOTAL_TIME += results['time']
obs_time += results['Observetime']
if results['finished'] == True:
success_count += 1
summary_file = open(filename, "a+")
summary_file.write(" Finished: {} CompletedAgents: {} TimeTaken: {} Length: {} ".format(results['finished'],
results['Successful_Agents'], results['time'], results['length']))
summary_file.write("\n")
summary_file.write("\n")
summary_file.close()
summary_file = open(filename, "a+")
num_iterations = flatland_environments[index-1][0]
successful_rate = round(
(100*total_completed/(num_iterations*num_agents)), 2)
episode_success_rate = round(
(100*success_count/num_iterations), 2)
total_done += total_completed
total_agents += num_iterations*num_agents
summary_file.write(
"Agent Success Rate: {}".format(successful_rate))
summary_file.write("\n")
summary_file.write(
"Episode Success Rate: {}".format(episode_success_rate))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Time Taken : {} Minutes".format(round((total_time/60),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Average Time Taken : {} Seconds".format(round((total_time/num_iterations),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Average Observation Time : {} Seconds".format(round((obs_time/num_iterations),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Time Elapsed so Far: {} Minutes".format(round((TOTAL_TIME/60),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"AVERAGE SUCCESS RATE: {}".format((100*total_done)/total_agents))
summary_file.close()
print("finished all tests!") | flatland_testing.py | from __future__ import division
import tensorflow as tf
from ACNET4_test import ACNet
import numpy as np
import json
import os
import time
import pickle
import tensorflow.contrib.layers as layers
import matplotlib.pyplot as plt
import threading
from datetime import datetime
import copy
import scipy.signal as signal
import sys
from NewAgentInitObs import StateMaskingObs as TrafficLightObs
from flatland.envs.rail_env import RailEnv
from flatland.utils.rendertools import RenderTool
from flatland.envs.rail_generators import sparse_rail_generator
from flatland.envs.schedule_generators import sparse_schedule_generator
from flatland.core.grid.grid_utils import distance_on_rail as manhattan_distance
import imageio
import random
environment_path = "saved_environments"
class FLATLAND(object):
'''
This class provides functionality for running multiple instances of the
trained network in a single environment
'''
def __init__(self, model_path, obs_size, TEST_FLATLAND_ENVIRONMENTS,saveGIF,gifs_path):
self.obs_size = obs_size
self.ADDITIONAL_INPUT = 6
self.TEST_FLATLAND_ENVIRONMENTS = TEST_FLATLAND_ENVIRONMENTS
self.PRUNE_ACTIONS = True
self.saveGIF = saveGIF
self.SAVEGIFFREQUENCY = 5
self.SKIPLARGE = True
self.gifs_path = gifs_path
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.network = ACNet("global", 4, None, False, "global", obs_size)
self.episode_count =0
# load the weights from the checkpoint (only the global ones!)
ckpt = tf.train.get_checkpoint_state(model_path)
saver = tf.train.Saver()
saver.restore(self.sess, ckpt.model_checkpoint_path)
def StateClassifier(self, agent_pos, agent_dir):
"""
returns 0 : No decision point
returns 1 : Stopping point (Decision at next cell)
returns 2 : At decision point currently (More than 1 available transition)
returns 3 : MUST STOP point - Agent Ahead
returns 4 : MUST STOP point + Stopping Point
returns None: invalid cell
"""
avb_moves = self.env.rail.get_transitions(*agent_pos, agent_dir)
move2grid = np.array([[[0, -1], [-1, 0], [0, +1]], [[-1, 0], [0, +1], [+1, 0]], [[0, +1], [+1, 0], [0, -1]],
[[+1, 0], [0, -1], [-1, 0]]]) # Obtained from collidingagent code
trans2act = np.array([[2, 3, 0, 1], [1, 2, 3, 0], [0, 1, 2, 3], [
3, 0, 1, 2]]) # Maps transition to an action
# next_dir_grid = np.array([-1,0,1]) # Maps action to a change in agent direction
if sum(avb_moves) > 1: # This is definitely a decision junction since more than 1 move possible
return 2
elif sum(avb_moves) == 1:
# Get the available transition to next cell
avbmove = avb_moves.index(1)
# Get the corresponding action for that transition
action = trans2act[agent_dir][avbmove]
if action == 0:
next_pos = agent_pos + move2grid[(agent_dir + 2) % 4][
1] # This is a dead end, so turn around and move forward
else:
next_pos = agent_pos + move2grid[agent_dir][action - 1]
# next_dir = (agent_dir + (next_dir_grid[action-1]) )%4
sumnextcell = 0 # How many possible transitions at next cell
for i in range(0, 4):
new_avb_moves = self.env.rail.get_transitions(*next_pos, i)
sumnextcell += sum(new_avb_moves)
# Also have to check whether the junction is occupied
Occupied = False
for k in range(len(self.env.agents)):
if self.env.agents[k].position is None:
if self.env.dones[k]:
my_pos = (-3, -3)
else:
my_pos = (-3,-3)
else:
my_pos = self.env.agents[k].position
if my_pos[0] == next_pos[0] and my_pos[1] == next_pos[1]:
Occupied = True
break
if (sumnextcell > 2) and Occupied:
return 4 # The agent is currently at a MUST STOP point
elif (sumnextcell > 2) and (not Occupied): # The agent is at a stopping point
return 1
elif (sumnextcell <= 2) and Occupied:
return 3 # The agent is at a MUST STOP point
else:
return 0 # The agent is at a no decision point
else:
# print("The agent is at an impossible cell") # This happen when checking stopping agents
# print("agent_dir:", agent_dir, " agent_pos:", agent_pos)
return None
def _NextValidActions(self,obs,agentID):
"""
returns list of valid actions
List[0]= LEFT , List[1] = Straight , List[2] = Right , List[3] = Stop
If at NO decision point, just go forward [0]
If at stopping point, look 1 timestep into future and conclude : returns [0,3](stop,go) or [3](stop)
If at junction, get valid directions to go in. No stopping allowed here
If no available direction at junction : return [3](stop) , This means we're screwed
"""
currentobs = obs[0:self.obs_size-self.ADDITIONAL_INPUT]
traffic_signal = obs[self.obs_size-self.ADDITIONAL_INPUT]
homo_junctions = obs[(self.obs_size-3) :self.obs_size]
if traffic_signal == -1:
validactions = [3]
return validactions
currentobs = np.reshape(currentobs, (3, -1))
if self.env.agents[agentID].position is None:
if self.env.dones[agentID]:
actual_dir = self.env.agents[agentID].old_direction
actual_pos = self.env.agents[agentID].target
else:
actual_dir = self.env.agents[agentID].initial_direction
actual_pos = self.env.agents[agentID].initial_position
else:
actual_dir = self.env.agents[agentID].direction
actual_pos = self.env.agents[agentID].position
state = self.StateClassifier(actual_pos, actual_dir)
# currentobs = joint_observations[self.metaAgentID][self.agentID]
if state in [3, 4]: # Must Stop Point
validactions = [3]
return validactions
elif state == 0: # Currently at NO decision point
validactions = [1]
return validactions
elif state == 1: # Currently at stopping point
SolExist = [currentobs[0][0], currentobs[1][0], currentobs[2][0]] # Imagine we are at decision junction
agentsblocking = [currentobs[0][2], currentobs[1][2], currentobs[2][2]]
agentsblockingjunction = [currentobs[0][3], currentobs[1][3], currentobs[2][3]]
agentsdiff = [currentobs[0][4], currentobs[1][4], currentobs[2][4]]
for i in range(0, 3):
# Check if there is any available non-blocked path which leads to a solution
if (SolExist[i] == 1) and (agentsblocking[i] == 0):
validactions = [1, 3] # If there is such a path, then going forward allowed
return validactions
if homo_junctions.count(1) >=2 :
for i in range(0,3) :
if (SolExist[i] == 1) and (agentsblockingjunction[i] == 0) and homo_junctions[i]== 1 :
validactions = [1, 3]
return validactions
validactions = [3] # If there is no such path, only stopping allowed
return validactions
else: # Currently at junction
SolExist = [currentobs[0][0], currentobs[1][0], currentobs[2][0]]
agentsblocking = [currentobs[0][2], currentobs[1][2], currentobs[2][2]]
agentsblockingjunction = [currentobs[0][3], currentobs[1][3], currentobs[2][3]]
agentsdiff = [currentobs[0][4], currentobs[1][4], currentobs[2][4]]
# stoppingoccupied = [currentobs[0][ENTRY_PER_COLUMN - 1],
# currentobs[1][ENTRY_PER_COLUMN - 1],
# currentobs[2][ENTRY_PER_COLUMN - 1]]
validactions = []
for i in range(0, 3):
if (SolExist[i] == 1) and (agentsblocking[i] == 0): # and stoppingoccupied[i]==False:
validactions.append(i)
if validactions:
return validactions
else:
if homo_junctions.count(1) >=2 :
for i in range(0,3) :
if (SolExist[i] == 1) and (agentsblockingjunction[i] == 0) and homo_junctions[i]== 1 :
validactions.append(i)
break
if validactions:
return validactions
return [3]
def getparams(self, size):
tid = np.random.randint(0, 50)
seed = tid * 19997 + 997
random.seed(seed)
nSize = int((size-20)/5)
nr_cities = 2 + nSize // 2 + random.randint(0, 2)
# , 10 + random.randint(0, 10))
nr_trains = min(nr_cities * 5, 5 + random.randint(0, 5))
max_rails_between_cities = 2
max_rails_in_cities = 3 + random.randint(0, nSize)
malfunction_rate = 30 + random.randint(0, 100)
malfunction_min_duration = 3 + random.randint(0, 7)
malfunction_max_duration = 20 + random.randint(0, 80)
return (
seed, nr_trains, nr_cities,
max_rails_between_cities, max_rails_in_cities,
malfunction_rate, malfunction_min_duration, malfunction_max_duration
)
def make_gif(self,images, fname, duration=2, true_image=False,salience=False,salIMGS=None):
imageio.mimwrite(fname,images,subrectangles=True)
print("\nwrote gif")
def set_env(self, num_agents, id, width, height, max_cities=None, max_rails=None):
if not TEST_FLATLAND_ENVIRONMENTS:
if id % 10 == 0:
while True:
try:
seed, nr_trains, nr_cities,\
max_rails_between_city, max_rails_in_cities, _, _, _ = self.getparams(
width)
print('size:', width)
print('cities:', nr_cities)
print('agents', num_agents)
print('seed', seed)
gameEnv = RailEnv(width=width, height=width, rail_generator=sparse_rail_generator(
max_num_cities=nr_cities, max_rails_between_cities=max_rails_between_city,
max_rails_in_city=max_rails_in_cities, seed=seed, grid_mode=False),
schedule_generator=sparse_schedule_generator(), obs_builder_object=TrafficLightObs(),
number_of_agents=num_agents)
gameEnv.global_reward = 20
gameEnv.step_penalty = -0.3
self.env = gameEnv
obs = self.env.reset(True, True)
break
except Exception:
pass
else:
obs = self.env.reset(True, True)
print('Reset Successfully')
self.episode_count +=1
if self.saveGIF and self.episode_count % self.SAVEGIFFREQUENCY ==0 :
self.env_renderer = RenderTool(self.env)
self.env_renderer.render_env(show=False, frames=False, show_observations=False)
self.episode_frames = [self.env_renderer.get_image()]
return obs
else:
if id == 0:
while True:
try:
tid = np.random.randint(0, 50)
seed = tid * 19997 + 997
gameEnv = RailEnv(width=width, height=height, rail_generator=sparse_rail_generator(
max_num_cities=max_cities, max_rails_between_cities=2,
max_rails_in_city=max_rails, seed=seed, grid_mode=False),
schedule_generator=sparse_schedule_generator(), obs_builder_object=TrafficLightObs(),
number_of_agents=num_agents)
gameEnv.global_reward = 20
gameEnv.step_penalty = -0.3
self.env = gameEnv
obs = self.env.reset(True, True)
break
except Exception:
pass
else:
obs = self.env.reset(True, True)
print('Reset Successfully')
self.episode_count +=1
if self.saveGIF and self.episode_count % self.SAVEGIFFREQUENCY ==0 :
self.env_renderer = RenderTool(self.env)
self.env_renderer.render_env(show=False, frames=False, show_observations=False)
self.episode_frames = [self.env_renderer.get_image()]
return obs
def check_action(self, obs, id, done_tag):
if done_tag == 1:
return 0
initialization = obs[self.obs_size - 5]
previous_pos = self.env.agents[id].position if self.env.agents[id].position \
else self.env.agents[id].initial_position
previous_dir = self.env.agents[id].direction if self.env.agents[id].direction is not None else \
self.env.agents[id].initial_direction
state = self.StateClassifier(previous_pos, previous_dir)
if initialization == 1:
return 0
elif initialization == 0 and self.initialized[id] == 0:
self.initialized[id] = 1
return 2
elif state == 0: # no decision point
return 2
elif state in [3, 4]:
return 4
else:
return -1
def step_all_parallel(self, step, all_obs):
'''advances the state of the environment by a single step across all agents'''
joint_actions = {}
if step == 0:
for agent in range(0, len(self.env.agents)):
o = all_obs[0][agent]
s_feed = np.reshape(o, (1, self.obs_size))
a = self.check_action(o, agent, self.agent_done[agent])
if a == -1:
a_dist = self.sess.run([self.network.policy], feed_dict={
self.network.inputs: s_feed})
a = np.random.choice(
range(a_dist.shape[1]), p=a_dist.ravel()) + 1
joint_actions[agent] = a
else:
if len(self.env.agents) < 81 or ((len(self.env.agents)==100) and ((self.env.height+self.env.width)==200)) or self.SKIP_LARGE == False:
observations =[]
for i in range(0,len(self.env.agents)) :
observations.append(all_obs[i])
s_feed = np.reshape(observations, (len(self.env.agents), self.obs_size))
action_set = self.sess.run([self.network.policy], feed_dict={
self.network.inputs: s_feed})
for agent in range(0, len(self.env.agents)):
o = all_obs[agent]
#s_feed = np.reshape(o, (1, self.obs_size))
a = self.check_action(o, agent, self.agent_done[agent])
if a == -1:
# a_dist = self.sess.run([self.network.policy], feed_dict={
# self.network.inputs: s_feed})
a_dist = action_set[0][agent]
a_dist = np.array(a_dist)
#a_dist = a_dist[0]
if self.PRUNE_ACTIONS :
validactions = self._NextValidActions(o,agent)
if not (np.argmax(a_dist.flatten()) in validactions):
a = np.random.choice(validactions) + 1
else :
a = np.argmax(a_dist.flatten()) + 1
else :
a = np.argmax(a_dist.flatten()) + 1 # a = np.random.choice(range(a_dist.shape[1]), p=a_dist.ravel()) + 1
joint_actions[agent] = a
starttime = time.time()
all_obs, _, all_done, _ = self.env.step(joint_actions)
self.timeobs += round((time.time()-starttime), 2)
return all_obs, all_done
def find_path(self, all_obs, max_step=384):
'''run a full environment to completion, or until max_step steps'''
solution = []
step = 0
self.initialized = [0 for i in range(len(self.env.agents))]
self.agent_done = [0 for i in range(len(self.env.agents))]
self.timeobs =0
while(not self.env.dones["__all__"] and step < max_step):
timestep = []
for agent in range(0, len(self.env.agents)):
position = self.env.agents[agent].position if self.env.agents[agent].position is not None else \
self.env.agents[agent].initial_position
timestep.append(position)
solution.append(np.array(timestep))
all_obs, all_done = self.step_all_parallel(step, all_obs)
for agent in range(0, len(self.env.agents)):
self.agent_done[agent] = all_done[agent]
step += 1
if self.saveGIF and self.episode_count% self.SAVEGIFFREQUENCY ==0 :
self.env_renderer.render_env(show=False, frames=False, show_observations=False)
self.episode_frames.append(self.env_renderer.get_image())
if self.saveGIF and self.episode_count% self.SAVEGIFFREQUENCY ==0 :
time_per_step = 0.1
images = np.array(self.episode_frames)
self.make_gif(images, '{}/test_episode_{:d}_{:d}_{:s}.gif'.format(self.gifs_path,self.episode_count,step,("_success" if self.env.dones["__all__"] else "")))
print('step', step)
print('Done', self.agent_done.count(1), '/', len(self.env.agents))
for agent in range(0, len(self.env.agents)):
position = self.env.agents[agent].position if self.env.agents[
agent].position is not None else self.env.agents[agent].initial_position
timestep.append(position)
all_done = self.agent_done.count(1) == len(self.env.agents)
return np.array(solution), all_done, self.agent_done.count(1), self.timeobs
def make_name(num_agents, size, id, extension, dirname, extra=""):
if extra == "":
return dirname+'/'+"{}_agents_{}_size_{}_id_{}".format(num_agents, size, id, extension)
else:
return dirname+'/'+"{}_agents_{}_size_{}_id_{}{}".format(num_agents, size, id, extra, extension)
def run_simulations(next, flatland_test):
(num_agents, id, width, height, max_cities, max_rails) = next
all_obs = flatland_test.set_env(
num_agents, id, width, height, max_cities, max_rails)
results = dict()
start_time = time.time()
print('Starting test ({},{},{},{})'.format(num_agents, width, height, id))
max_time = int(8*(height + width + (num_agents/max_cities))) -2
path, all_done, num_done , obs_time = flatland_test.find_path(all_obs, max_time)
results['Successful_Agents'] = num_done
results['Observetime'] = obs_time
results['finished'] = True if all_done else False
results['time'] = round((time.time()-start_time), 2)
results['length'] = len(path)
return results
if __name__ == "__main__":
def getfilename() :
today = datetime.today()
d1 = today.strftime("%d-%m")
now = datetime.now()
current_time = now.strftime("%H")
filename = "Flatland_Test" + "_"+ d1 + "_" + current_time + ".txt"
return filename
obs_size = TrafficLightObs.OBS_SIZE
num_agents = 4
num_iterations = 10
min_grid_size = 30
max_grid_size = 80
max_agents = 128
TEST_FLATLAND_ENVIRONMENTS = True
saveGIF = False
filename = str(getfilename())
flatland_environments = [[50, 5, 25, 25, 2, 3, 50], [50, 10, 30, 30, 2, 3, 100], [50, 20, 30, 30, 3, 3, 200], [40, 50, 20, 35, 3, 3, 500],
[30, 80, 35, 20, 5, 3, 800], [30, 80, 35, 35, 5, 4, 800], [
30, 80, 40, 60, 9, 4, 800], [30, 80, 60, 40, 13, 4, 800],
[20, 80, 60, 60, 17, 4, 800], [20, 100, 80, 120, 21, 4, 1000], [
20, 100, 100, 80, 25, 4, 1000], [10, 200, 100, 100, 29, 4, 2000],
[10, 200, 150, 150, 33, 4, 2000], [10, 400, 150, 150, 37, 4, 4000]]
flatland_test = FLATLAND('newmod', obs_size,
TEST_FLATLAND_ENVIRONMENTS,saveGIF,'./gifs_SMObs')
summary_file = open(filename, "w+")
summary_file.write("Summary of Flatland Testing")
summary_file.write("\n")
summary_file.close()
if not TEST_FLATLAND_ENVIRONMENTS:
while num_agents <= max_agents:
num_agents *= 2
print("Starting tests for %d agents" % num_agents)
for size in range(min_grid_size, max_grid_size, 5):
summary_file = open(filename, "a+")
if size != 30:
successful_rate = round(
(100*total_completed/(num_iterations*num_agents)), 2)
episode_success_rate = round(
(100*success_count/num_iterations), 2)
summary_file.write(
"Agent Success Rate: {}".format(successful_rate))
summary_file.write("\n")
summary_file.write(
"Episode Success Rate: {}".format(episode_success_rate))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Size: {} Agents: {}".format(size, num_agents))
summary_file.write("\n")
summary_file.write("\n")
summary_file.close()
total_completed = 0
success_count = 0
total_time = 0
for iter in range(num_iterations):
results = run_simulations(
(num_agents, iter, size, size, 3, None), flatland_test)
total_completed += results['Successful_Agents']
if results['finished'] == True:
success_count += 1
summary_file = open(filename, "a+")
summary_file.write(" Finished: {} CompletedAgents: {} TimeTaken: {} Length: {} ".format(results['finished'],
results['Successful_Agents'], results['time'], results['length']))
summary_file.write("\n")
summary_file.write("\n")
summary_file.close()
else:
total_done = 0
total_agents = 0
TOTAL_TIME = 0
for index in range(len(flatland_environments)):
summary_file = open(filename, "a+")
if index != 0:
num_iterations = flatland_environments[index-1][0]
successful_rate = round(
(100*total_completed/(num_iterations*num_agents)), 2)
episode_success_rate = round(
(100*success_count/num_iterations), 2)
total_done += total_completed
total_agents += num_iterations*num_agents
summary_file.write(
"Agent Success Rate: {}".format(successful_rate))
summary_file.write("\n")
summary_file.write(
"Episode Success Rate: {}".format(episode_success_rate))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Time Taken : {} Minutes".format(round((total_time/60),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Average Time Taken : {} Seconds".format(round((total_time/num_iterations),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Average Observation Time : {} Seconds".format(round((obs_time/num_iterations),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Time Elapsed so Far: {} Minutes".format(round((TOTAL_TIME/60),2)))
summary_file.write("\n")
summary_file.write("\n")
num_agents = flatland_environments[index][1]
width = flatland_environments[index][2]
height = flatland_environments[index][3]
max_cities = flatland_environments[index][4]
max_rails = flatland_environments[index][5]
summary_file.write("Environment: {} Agents: {} Width: {} Height: {}".format(index,num_agents,width,height))
summary_file.write("\n")
summary_file.write("\n")
summary_file.close()
total_completed = 0
total_time = 0
success_count = 0
obs_time = 0
for iter in range(flatland_environments[index][0]):
results = run_simulations(
(num_agents, iter, width, height, max_cities, max_rails), flatland_test)
total_completed += results['Successful_Agents']
total_time+= results['time']
TOTAL_TIME += results['time']
obs_time += results['Observetime']
if results['finished'] == True:
success_count += 1
summary_file = open(filename, "a+")
summary_file.write(" Finished: {} CompletedAgents: {} TimeTaken: {} Length: {} ".format(results['finished'],
results['Successful_Agents'], results['time'], results['length']))
summary_file.write("\n")
summary_file.write("\n")
summary_file.close()
summary_file = open(filename, "a+")
num_iterations = flatland_environments[index-1][0]
successful_rate = round(
(100*total_completed/(num_iterations*num_agents)), 2)
episode_success_rate = round(
(100*success_count/num_iterations), 2)
total_done += total_completed
total_agents += num_iterations*num_agents
summary_file.write(
"Agent Success Rate: {}".format(successful_rate))
summary_file.write("\n")
summary_file.write(
"Episode Success Rate: {}".format(episode_success_rate))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Time Taken : {} Minutes".format(round((total_time/60),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Average Time Taken : {} Seconds".format(round((total_time/num_iterations),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Average Observation Time : {} Seconds".format(round((obs_time/num_iterations),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"Time Elapsed so Far: {} Minutes".format(round((TOTAL_TIME/60),2)))
summary_file.write("\n")
summary_file.write("\n")
summary_file.write(
"AVERAGE SUCCESS RATE: {}".format((100*total_done)/total_agents))
summary_file.close()
print("finished all tests!") | 0.421314 | 0.369059 |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnszone_binding(base_resource):
""" Binding class showing the resources that can be bound to dnszone_binding.
"""
def __init__(self) :
self._zonename = ""
self.dnszone_dnskey_binding = []
self.dnszone_domain_binding = []
@property
def zonename(self) :
ur"""Name of the zone. Mutually exclusive with the type parameter.<br/>Minimum length = 1.
"""
try :
return self._zonename
except Exception as e:
raise e
@zonename.setter
def zonename(self, zonename) :
ur"""Name of the zone. Mutually exclusive with the type parameter.<br/>Minimum length = 1
"""
try :
self._zonename = zonename
except Exception as e:
raise e
@property
def dnszone_dnskey_bindings(self) :
ur"""dnskey that can be bound to dnszone.
"""
try :
return self._dnszone_dnskey_binding
except Exception as e:
raise e
@property
def dnszone_domain_bindings(self) :
ur"""domain that can be bound to dnszone.
"""
try :
return self._dnszone_domain_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnszone_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnszone_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.zonename is not None :
return str(self.zonename)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, zonename) :
ur""" Use this API to fetch dnszone_binding resource.
"""
try :
if type(zonename) is not list :
obj = dnszone_binding()
obj.zonename = zonename
response = obj.get_resource(service)
else :
if zonename and len(zonename) > 0 :
obj = [dnszone_binding() for _ in range(len(zonename))]
for i in range(len(zonename)) :
obj[i].zonename = zonename[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class dnszone_binding_response(base_response) :
def __init__(self, length=1) :
self.dnszone_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnszone_binding = [dnszone_binding() for _ in range(length)] | nssrc/com/citrix/netscaler/nitro/resource/config/dns/dnszone_binding.py |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class dnszone_binding(base_resource):
""" Binding class showing the resources that can be bound to dnszone_binding.
"""
def __init__(self) :
self._zonename = ""
self.dnszone_dnskey_binding = []
self.dnszone_domain_binding = []
@property
def zonename(self) :
ur"""Name of the zone. Mutually exclusive with the type parameter.<br/>Minimum length = 1.
"""
try :
return self._zonename
except Exception as e:
raise e
@zonename.setter
def zonename(self, zonename) :
ur"""Name of the zone. Mutually exclusive with the type parameter.<br/>Minimum length = 1
"""
try :
self._zonename = zonename
except Exception as e:
raise e
@property
def dnszone_dnskey_bindings(self) :
ur"""dnskey that can be bound to dnszone.
"""
try :
return self._dnszone_dnskey_binding
except Exception as e:
raise e
@property
def dnszone_domain_bindings(self) :
ur"""domain that can be bound to dnszone.
"""
try :
return self._dnszone_domain_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(dnszone_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.dnszone_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.zonename is not None :
return str(self.zonename)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, zonename) :
ur""" Use this API to fetch dnszone_binding resource.
"""
try :
if type(zonename) is not list :
obj = dnszone_binding()
obj.zonename = zonename
response = obj.get_resource(service)
else :
if zonename and len(zonename) > 0 :
obj = [dnszone_binding() for _ in range(len(zonename))]
for i in range(len(zonename)) :
obj[i].zonename = zonename[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class dnszone_binding_response(base_response) :
def __init__(self, length=1) :
self.dnszone_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.dnszone_binding = [dnszone_binding() for _ in range(length)] | 0.54819 | 0.168378 |
from tina.advans import *
@ti.func
def inside(p, a, b, c):
u = (a - p).cross(b - p)
v = (b - p).cross(c - p)
w = (c - p).cross(a - p)
ccw = u >= 0 and v >= 0 and w >= 0
cw = u <= 0 and v <= 0 and w <= 0
return ccw or cw
@ti.data_oriented
class MeshVoxelizer:
def __init__(self, res):
self.res = tovector(res)
self.dx = 1 / self.res.x
self.padding = 3
self.voxels = ti.field(int, self.res)
self.temp = ti.field(int, self.res)
#self.block = ti.root.pointer(ti.ijk, self.res // 8)
#self.block.dense(ti.ijk, 8).place(sel<F4>f.voxels)
def voxelize(self, verts, vmin=None, vmax=None):
if vmin is None or vmax is None:
vmin, vmax = np.min(verts) - 0.1, np.max(verts) + 0.1
verts = (verts - vmin) / (vmax - vmin)
self._voxelize(verts)
self._update(None)
tmp = np.array(verts)
tmp[..., (0, 1, 2)] = verts[..., (2, 0, 1)]
self._voxelize(tmp)
self._update(lambda x, y, z: (z, x, y))
'''
tmp = np.array(verts)
tmp[..., (0, 1, 2)] = verts[..., (1, 2, 0)]
self._voxelize(tmp)
self._update(lambda x, y, z: (y, z, x))
'''
return vmin, vmax, verts
@ti.kernel
def _update(self, f: ti.template()):
for I in ti.grouped(self.temp):
if ti.static(f is None):
self.voxels[I] = self.temp[I]
self.temp[I] = 0
else:
J = V(*f(*I))
self.voxels[I] = min(self.voxels[I], max(0, self.temp[J]))
self.temp[J] = 0
@ti.kernel
def _voxelize(self, verts: ti.ext_arr()):
for i in range(verts.shape[0]):
jitter = V(-0.0576167239, -0.2560898629, 0.06716309129) * 1e-4
a = V(verts[i, 0, 0], verts[i, 0, 1], verts[i, 0, 2]) + jitter
b = V(verts[i, 1, 0], verts[i, 1, 1], verts[i, 1, 2]) + jitter
c = V(verts[i, 2, 0], verts[i, 2, 1], verts[i, 2, 2]) + jitter
bmin, bmax = min(a, b, c), max(a, b, c)
pmin = max(self.padding, ifloor(bmin.xy / self.dx))
pmax = min(self.res.xy - self.padding, ifloor(bmax.xy / self.dx) + 1)
normal = (b - a).cross(c - a).normalized()
if abs(normal.z) < 1e-10:
continue
for p in range(pmin.x, pmax.x):
for q in range(pmin.y, pmax.y):
pos = (V(p, q) + 0.5) * self.dx
if inside(pos, a.xy, b.xy, c.xy):
base = V23(pos, 0.)
hei = int(-normal.dot(base - a) / normal.z / self.dx)
hei = min(hei, self.res.x - self.padding)
inc = 1 if normal.z > 0 else -1
for s in range(self.padding, hei):
self.temp[p, q, s] += inc
if __name__ == '__main__':
ti.init(ti.cuda)
vox = MeshVoxelizer([256] * 3)
verts, faces = tina.readobj('assets/monkey.obj', simple=True)
verts *= 0.5
verts += 0.5
scene = tina.Scene(taa=True)
volume = tina.SimpleVolume(vox.res.x)
scene.add_object(volume)
#model = tina.MeshToWire(tina.MeshModel('assets/monkey.obj'))
#scene.add_object(model)
vox.voxelize(verts[faces])
volume.set_volume_density(np.abs(vox.voxels.to_numpy()) * 0.05)
gui = ti.GUI()
while gui.running:
scene.input(gui)
scene.render()
gui.set_image(scene.img)
gui.show() | melt/voxelizer.py | from tina.advans import *
@ti.func
def inside(p, a, b, c):
u = (a - p).cross(b - p)
v = (b - p).cross(c - p)
w = (c - p).cross(a - p)
ccw = u >= 0 and v >= 0 and w >= 0
cw = u <= 0 and v <= 0 and w <= 0
return ccw or cw
@ti.data_oriented
class MeshVoxelizer:
def __init__(self, res):
self.res = tovector(res)
self.dx = 1 / self.res.x
self.padding = 3
self.voxels = ti.field(int, self.res)
self.temp = ti.field(int, self.res)
#self.block = ti.root.pointer(ti.ijk, self.res // 8)
#self.block.dense(ti.ijk, 8).place(sel<F4>f.voxels)
def voxelize(self, verts, vmin=None, vmax=None):
if vmin is None or vmax is None:
vmin, vmax = np.min(verts) - 0.1, np.max(verts) + 0.1
verts = (verts - vmin) / (vmax - vmin)
self._voxelize(verts)
self._update(None)
tmp = np.array(verts)
tmp[..., (0, 1, 2)] = verts[..., (2, 0, 1)]
self._voxelize(tmp)
self._update(lambda x, y, z: (z, x, y))
'''
tmp = np.array(verts)
tmp[..., (0, 1, 2)] = verts[..., (1, 2, 0)]
self._voxelize(tmp)
self._update(lambda x, y, z: (y, z, x))
'''
return vmin, vmax, verts
@ti.kernel
def _update(self, f: ti.template()):
for I in ti.grouped(self.temp):
if ti.static(f is None):
self.voxels[I] = self.temp[I]
self.temp[I] = 0
else:
J = V(*f(*I))
self.voxels[I] = min(self.voxels[I], max(0, self.temp[J]))
self.temp[J] = 0
@ti.kernel
def _voxelize(self, verts: ti.ext_arr()):
for i in range(verts.shape[0]):
jitter = V(-0.0576167239, -0.2560898629, 0.06716309129) * 1e-4
a = V(verts[i, 0, 0], verts[i, 0, 1], verts[i, 0, 2]) + jitter
b = V(verts[i, 1, 0], verts[i, 1, 1], verts[i, 1, 2]) + jitter
c = V(verts[i, 2, 0], verts[i, 2, 1], verts[i, 2, 2]) + jitter
bmin, bmax = min(a, b, c), max(a, b, c)
pmin = max(self.padding, ifloor(bmin.xy / self.dx))
pmax = min(self.res.xy - self.padding, ifloor(bmax.xy / self.dx) + 1)
normal = (b - a).cross(c - a).normalized()
if abs(normal.z) < 1e-10:
continue
for p in range(pmin.x, pmax.x):
for q in range(pmin.y, pmax.y):
pos = (V(p, q) + 0.5) * self.dx
if inside(pos, a.xy, b.xy, c.xy):
base = V23(pos, 0.)
hei = int(-normal.dot(base - a) / normal.z / self.dx)
hei = min(hei, self.res.x - self.padding)
inc = 1 if normal.z > 0 else -1
for s in range(self.padding, hei):
self.temp[p, q, s] += inc
if __name__ == '__main__':
ti.init(ti.cuda)
vox = MeshVoxelizer([256] * 3)
verts, faces = tina.readobj('assets/monkey.obj', simple=True)
verts *= 0.5
verts += 0.5
scene = tina.Scene(taa=True)
volume = tina.SimpleVolume(vox.res.x)
scene.add_object(volume)
#model = tina.MeshToWire(tina.MeshModel('assets/monkey.obj'))
#scene.add_object(model)
vox.voxelize(verts[faces])
volume.set_volume_density(np.abs(vox.voxels.to_numpy()) * 0.05)
gui = ti.GUI()
while gui.running:
scene.input(gui)
scene.render()
gui.set_image(scene.img)
gui.show() | 0.501709 | 0.554953 |
import base64
import warnings
from optigatrust.util.types import *
from optigatrust.util import io
__all__ = [
'read_existing',
'write_new',
]
def _break_apart(f, sep, step):
return sep.join(f[n:n + step] for n in range(0, len(f), step))
def read_existing(certid=ObjectId.IFX_CERT, to_pem=False):
"""
This function returns an exisiting certificate from the OPTIGA(TM) Trust device
:param certid:
Should be a value from the ObjectId
:param to_pem:
A boolean flag to indecate, whether you want return certificate PEM encoded
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the chip initialisation library
:return:
A byte string with a PEM certificate or DER encoded byte string
"""
if not isinstance(certid, ObjectId):
raise TypeError(
'Certificate Slot is not correct. '
'Supported values are in ObjectId class you used {0}'.format(certid)
)
if certid not in {ObjectId.IFX_CERT, ObjectId.USER_CERT_1, ObjectId.USER_CERT_2, ObjectId.USER_CERT_3}:
warnings.warn("You are going to use an object which is outside of the standard certificate storage")
der_cert = io.read(certid)
print(list(der_cert))
if len(der_cert) == 0:
raise ValueError(
'Certificate Slot {0} is empty'.format(certid)
)
# OPTIGA Trust Code to tag an X509 certificate
if der_cert[0] == 0xC0:
der_cert = der_cert[9:]
if to_pem:
pem_cert = "-----BEGIN CERTIFICATE-----\n"
pem_cert += _break_apart(base64.b64encode(der_cert).decode(), '\n', 64)
pem_cert += "\n-----END CERTIFICATE-----"
return pem_cert.encode()
else:
return bytes(der_cert)
def _append_length(data, last=False):
data_with_length = bytearray(3)
left = len(data)
data_with_length[2] = left % 0x100
left = left >> 8
data_with_length[1] = left % 0x100
if last:
data_with_length[0] = 0xC0
else:
left = left >> 8
data_with_length[0] = left % 0x100
data_with_length.extend(data)
return data_with_length
def _strip_cert(cert):
if cert.split('\n')[0] != "-----BEGIN CERTIFICATE-----":
raise ValueError(
'Incorrect Certificate '
'Should start with "-----BEGIN CERTIFICATE-----" your starts with {0}'.format(cert.split('\n')[0])
)
raw_cert = cert.replace('-----BEGIN CERTIFICATE-----', '')
raw_cert = raw_cert.replace('-----END CERTIFICATE-----', '')
raw_cert = raw_cert.replace("\n", "")
der_cert = base64.b64decode(raw_cert)
return der_cert
def write_new(cert, certid=ObjectId.USER_CERT_1):
"""
This function writes a new certificate into the OPTIGA(TM) Trust device
:param cert:
Should be a a string with a PEM file with newlines separated or a bytes insatnce with DER encoded cert
:param certid:
Should be a value from the ObjectId
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the chip initialisation library
:return:
None
"""
if not isinstance(certid, ObjectId):
raise TypeError(
'Certificate Slot is not correct. '
'Supported values are in ObjectId class you used {0}'.format(certid)
)
if certid not in {ObjectId.IFX_CERT, ObjectId.USER_CERT_1, ObjectId.USER_CERT_2, ObjectId.USER_CERT_3}:
warnings.warn("You are going to use an object which is outside of the standard certificate storage")
if not isinstance(cert, str) and not isinstance(cert, bytes) and not isinstance(cert, bytearray):
raise TypeError(
'Bad certificate type should be either bytes, bytes string, or string'
)
# Looks like a DER encoded files has been provided
if isinstance(cert, bytes) or isinstance(cert, bytearray):
try:
cert = cert.decode("utf-8")
cert = _strip_cert(cert)
except UnicodeError:
pass
elif isinstance(cert, str):
cert = _strip_cert(cert)
else:
raise TypeError(
'Bad certificate type should be either bytes, bytes string, or string'
)
der_cert = cert
if der_cert[0] != 0x30:
raise ValueError(
'Incorrect Certificate '
'Should start with 0x30 your starts with {0}'.format(der_cert[0])
)
# Append tags
# [len_byte_2, len_byte_1, len_byte_0] including the certificate and two lengths
# [len_byte_2, len_byte_1, len_byte_0] including the certificate and the length
# [len_byte_2, len_byte_1, len_byte_0]
# [der_encoded_certificate]
# Write the result into the given Object ID
l1_der_cert = _append_length(der_cert)
l2_der_cert = _append_length(l1_der_cert)
l3_der_cert = _append_length(l2_der_cert, last=True)
# print("Certificate without encoding #1 {0}".format(list(der_cert)))
# print("Certificate without encoding #2 {0}".format(list(l1_der_cert)))
# print("Certificate without encoding #3 {0}".format(list(l2_der_cert)))
# print("Certificate without encoding #4 {0}".format(list(l3_der_cert)))
io.write(l3_der_cert, certid) | lib/optigatrust/x509/cert.py | import base64
import warnings
from optigatrust.util.types import *
from optigatrust.util import io
__all__ = [
'read_existing',
'write_new',
]
def _break_apart(f, sep, step):
return sep.join(f[n:n + step] for n in range(0, len(f), step))
def read_existing(certid=ObjectId.IFX_CERT, to_pem=False):
"""
This function returns an exisiting certificate from the OPTIGA(TM) Trust device
:param certid:
Should be a value from the ObjectId
:param to_pem:
A boolean flag to indecate, whether you want return certificate PEM encoded
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the chip initialisation library
:return:
A byte string with a PEM certificate or DER encoded byte string
"""
if not isinstance(certid, ObjectId):
raise TypeError(
'Certificate Slot is not correct. '
'Supported values are in ObjectId class you used {0}'.format(certid)
)
if certid not in {ObjectId.IFX_CERT, ObjectId.USER_CERT_1, ObjectId.USER_CERT_2, ObjectId.USER_CERT_3}:
warnings.warn("You are going to use an object which is outside of the standard certificate storage")
der_cert = io.read(certid)
print(list(der_cert))
if len(der_cert) == 0:
raise ValueError(
'Certificate Slot {0} is empty'.format(certid)
)
# OPTIGA Trust Code to tag an X509 certificate
if der_cert[0] == 0xC0:
der_cert = der_cert[9:]
if to_pem:
pem_cert = "-----BEGIN CERTIFICATE-----\n"
pem_cert += _break_apart(base64.b64encode(der_cert).decode(), '\n', 64)
pem_cert += "\n-----END CERTIFICATE-----"
return pem_cert.encode()
else:
return bytes(der_cert)
def _append_length(data, last=False):
data_with_length = bytearray(3)
left = len(data)
data_with_length[2] = left % 0x100
left = left >> 8
data_with_length[1] = left % 0x100
if last:
data_with_length[0] = 0xC0
else:
left = left >> 8
data_with_length[0] = left % 0x100
data_with_length.extend(data)
return data_with_length
def _strip_cert(cert):
if cert.split('\n')[0] != "-----BEGIN CERTIFICATE-----":
raise ValueError(
'Incorrect Certificate '
'Should start with "-----BEGIN CERTIFICATE-----" your starts with {0}'.format(cert.split('\n')[0])
)
raw_cert = cert.replace('-----BEGIN CERTIFICATE-----', '')
raw_cert = raw_cert.replace('-----END CERTIFICATE-----', '')
raw_cert = raw_cert.replace("\n", "")
der_cert = base64.b64decode(raw_cert)
return der_cert
def write_new(cert, certid=ObjectId.USER_CERT_1):
"""
This function writes a new certificate into the OPTIGA(TM) Trust device
:param cert:
Should be a a string with a PEM file with newlines separated or a bytes insatnce with DER encoded cert
:param certid:
Should be a value from the ObjectId
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the chip initialisation library
:return:
None
"""
if not isinstance(certid, ObjectId):
raise TypeError(
'Certificate Slot is not correct. '
'Supported values are in ObjectId class you used {0}'.format(certid)
)
if certid not in {ObjectId.IFX_CERT, ObjectId.USER_CERT_1, ObjectId.USER_CERT_2, ObjectId.USER_CERT_3}:
warnings.warn("You are going to use an object which is outside of the standard certificate storage")
if not isinstance(cert, str) and not isinstance(cert, bytes) and not isinstance(cert, bytearray):
raise TypeError(
'Bad certificate type should be either bytes, bytes string, or string'
)
# Looks like a DER encoded files has been provided
if isinstance(cert, bytes) or isinstance(cert, bytearray):
try:
cert = cert.decode("utf-8")
cert = _strip_cert(cert)
except UnicodeError:
pass
elif isinstance(cert, str):
cert = _strip_cert(cert)
else:
raise TypeError(
'Bad certificate type should be either bytes, bytes string, or string'
)
der_cert = cert
if der_cert[0] != 0x30:
raise ValueError(
'Incorrect Certificate '
'Should start with 0x30 your starts with {0}'.format(der_cert[0])
)
# Append tags
# [len_byte_2, len_byte_1, len_byte_0] including the certificate and two lengths
# [len_byte_2, len_byte_1, len_byte_0] including the certificate and the length
# [len_byte_2, len_byte_1, len_byte_0]
# [der_encoded_certificate]
# Write the result into the given Object ID
l1_der_cert = _append_length(der_cert)
l2_der_cert = _append_length(l1_der_cert)
l3_der_cert = _append_length(l2_der_cert, last=True)
# print("Certificate without encoding #1 {0}".format(list(der_cert)))
# print("Certificate without encoding #2 {0}".format(list(l1_der_cert)))
# print("Certificate without encoding #3 {0}".format(list(l2_der_cert)))
# print("Certificate without encoding #4 {0}".format(list(l3_der_cert)))
io.write(l3_der_cert, certid) | 0.34798 | 0.209369 |
from math import sin, cos, radians
class Pumpkin:
"""
Store and calculate data relating to a pumpkin projectile.
"""
# Class property
# ==============
time_interval = 0.1
# "Constructor" method
# ====================
def __init__(self, angle, velocity, initial_height=0):
# Instance variables
self.x_position = 0
self.y_position = initial_height
self.highest_point = initial_height
# Convert angle to radians
angle_in_radians = radians(angle)
self.x_velocity = velocity * cos(angle_in_radians)
self.y_velocity = velocity * sin(angle_in_radians)
# Mutator methods
# ===============
def update(self):
self.x_position = self.getNextX()
self.y_position = self.getNextY()
self.highest_point = self.checkHighestPoint()
# Helper methods
# ==============
def getNextX(self):
return self.x_position + self.time_interval * self.x_velocity
def getNextY(self):
final_y_velocity = self.y_velocity - self.time_interval * 9.8
y_position = self.y_position + self.time_interval * (
(self.y_velocity + final_y_velocity) / 2.0 # Average y velocity
)
self.y_velocity = final_y_velocity
return y_position
def checkHighestPoint(self):
if self.y_position > self.highest_point:
return self.y_position
else:
return self.highest_point
# Accessor methods
# ================
def getX(self):
return self.x_position
def getY(self):
return self.y_position
def getHighestPoint(self):
return self.highest_point
def main():
angle = float(input("Enter the launch angle (in degrees): "))
velocity = float(input("Enter the intial velocity (in m/s): "))
initial_height = float(input("Enter the initial height (m): "))
# Instantiate a new object, "pumpkin"
pumpkin = Pumpkin(angle, velocity, initial_height)
while pumpkin.getY() >= 0.0:
pumpkin.update()
print ("\nDistance traveled: {0:0.1f} meters".format(pumpkin.getX()))
print ("The highest point reached is {0:0.1f} meters\n".format(
pumpkin.getHighestPoint()))
if __name__ == '__main__':
main() | examples/lecture16b.py | from math import sin, cos, radians
class Pumpkin:
"""
Store and calculate data relating to a pumpkin projectile.
"""
# Class property
# ==============
time_interval = 0.1
# "Constructor" method
# ====================
def __init__(self, angle, velocity, initial_height=0):
# Instance variables
self.x_position = 0
self.y_position = initial_height
self.highest_point = initial_height
# Convert angle to radians
angle_in_radians = radians(angle)
self.x_velocity = velocity * cos(angle_in_radians)
self.y_velocity = velocity * sin(angle_in_radians)
# Mutator methods
# ===============
def update(self):
self.x_position = self.getNextX()
self.y_position = self.getNextY()
self.highest_point = self.checkHighestPoint()
# Helper methods
# ==============
def getNextX(self):
return self.x_position + self.time_interval * self.x_velocity
def getNextY(self):
final_y_velocity = self.y_velocity - self.time_interval * 9.8
y_position = self.y_position + self.time_interval * (
(self.y_velocity + final_y_velocity) / 2.0 # Average y velocity
)
self.y_velocity = final_y_velocity
return y_position
def checkHighestPoint(self):
if self.y_position > self.highest_point:
return self.y_position
else:
return self.highest_point
# Accessor methods
# ================
def getX(self):
return self.x_position
def getY(self):
return self.y_position
def getHighestPoint(self):
return self.highest_point
def main():
angle = float(input("Enter the launch angle (in degrees): "))
velocity = float(input("Enter the intial velocity (in m/s): "))
initial_height = float(input("Enter the initial height (m): "))
# Instantiate a new object, "pumpkin"
pumpkin = Pumpkin(angle, velocity, initial_height)
while pumpkin.getY() >= 0.0:
pumpkin.update()
print ("\nDistance traveled: {0:0.1f} meters".format(pumpkin.getX()))
print ("The highest point reached is {0:0.1f} meters\n".format(
pumpkin.getHighestPoint()))
if __name__ == '__main__':
main() | 0.876211 | 0.513059 |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SpmInterfacesHubLocatorGridRemote(RemoteModel):
"""
This table lists all SPM interfaces on which multiple end hosts were connected within the user specified period of time.
| ``id:`` The internal NetMRI identifier of the grid entry.
| ``attribute type:`` number
| ``Network:`` The name of the Network View associated.
| ``attribute type:`` string
| ``DeviceID:`` The NetMRI internal identifier for the device.
| ``attribute type:`` number
| ``DeviceType:`` The NetMRI-determined device type.
| ``attribute type:`` string
| ``DeviceName:`` The NetMRI name of the device; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``DeviceIPDotted:`` The management IP address of the device, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``DeviceIPNumeric:`` The numerical value of the device IP address.
| ``attribute type:`` number
| ``InterfaceID:`` The internal NetMRI identifier for the interface configured with this address.
| ``attribute type:`` number
| ``Interface:`` The name of this interface. This is typically the short name of the interface as it is identified in the console.
| ``attribute type:`` string
| ``ifIndex:`` The SNMP interface index of the interface configured with this address.
| ``attribute type:`` string
| ``ifDescr:`` The description of the interface, as set in the device's configuration file.
| ``attribute type:`` string
| ``ifAlias:`` Interface alias of this interface.
| ``attribute type:`` string
| ``ifType:`` The interface type of this interface.
| ``attribute type:`` string
| ``ifMAC:`` The interface Media Access Controller (MAC) address of this interface.
| ``attribute type:`` string
| ``ifOperStatus:`` The operational status (up/down) of this interface.
| ``attribute type:`` string
| ``ifAdminStatus:`` The configured status (up/down) of this interface.
| ``attribute type:`` string
| ``ifSpeed:`` The operational speed, in bps, of this interface.
| ``attribute type:`` number
| ``ifDuplex:`` The operational duplex of this interface.
| ``attribute type:`` string
| ``ifAdminDuplex:`` Admin setting of duplex, Auto indicates the device will try to negotiate with the other end to determine.
| ``attribute type:`` string
| ``PoEPower:`` Power draw of the supplied device in millivolts.
| ``attribute type:`` number
| ``PoEStatus:`` Status of the PoE connection.
| ``attribute type:`` string
| ``VlanIndex:`` The numerical VLAN number (VLAN ID).
| ``attribute type:`` number
| ``VlanName:`` The name of the VLAN on the root bridge.
| ``attribute type:`` string
| ``VlanID:`` The internal NetMRI identifier of the VLAN.
| ``attribute type:`` number
| ``VTPDomain:`` Management domain name if VLAN is VTP managed.
| ``attribute type:`` string
| ``EndHostCount:`` The number of end devices connected to this interface.
| ``attribute type:`` number
| ``PortStatus:`` Port Status. Valid values are "Used", "Free", or "Avail".
| ``attribute type:`` string
| ``Packets:`` Total inbound and outbound packets on this interface.
| ``attribute type:`` number
| ``Errors:`` Total inbound and outbound errors on this interface.
| ``attribute type:`` number
| ``ErrorPercentage:`` Percentage of errors on this interface.
| ``attribute type:`` number
| ``FirstSeen:`` The timestamp of when NetMRI first discovered this interface.
| ``attribute type:`` datetime
| ``LastSeen:`` The timestamp of when NetMRI last polled data from this interface.
| ``attribute type:`` datetime
| ``ifPortControlInd:`` A flag indicating whether or not this interface is available for port control actions.
| ``attribute type:`` bool
| ``ifSwitchPortMgmtInd:`` A flag indicating whether or not this interface is available in switch port management views.
| ``attribute type:`` bool
| ``ifName:`` The name of the switch interface. This is typically the short name of the interface as it is identified in the console.
| ``attribute type:`` string
| ``VirtualNetworkID:`` The internal identifier for the network which the device is associated to.
| ``attribute type:`` number
| ``ifIPDotted:`` The IP address in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``ifIPNumeric:`` The numerical value of the IP address.
| ``attribute type:`` number
| ``VirtualNetworkMemberName:`` The name of the VRF as configured on this device.
| ``attribute type:`` string
| ``ifTrunkStatus:`` Trunk Status
| ``attribute type:`` string
"""
properties = ("id",
"Network",
"DeviceID",
"DeviceType",
"DeviceName",
"DeviceIPDotted",
"DeviceIPNumeric",
"InterfaceID",
"Interface",
"ifIndex",
"ifDescr",
"ifAlias",
"ifType",
"ifMAC",
"ifOperStatus",
"ifAdminStatus",
"ifSpeed",
"ifDuplex",
"ifAdminDuplex",
"PoEPower",
"PoEStatus",
"VlanIndex",
"VlanName",
"VlanID",
"VTPDomain",
"EndHostCount",
"PortStatus",
"Packets",
"Errors",
"ErrorPercentage",
"FirstSeen",
"LastSeen",
"ifPortControlInd",
"ifSwitchPortMgmtInd",
"ifName",
"VirtualNetworkID",
"ifIPDotted",
"ifIPNumeric",
"VirtualNetworkMemberName",
"ifTrunkStatus",
)
@property
@check_api_availability
def meta(self):
"""
User custom fields
``attribute type:`` model
"""
return self.broker.meta(**{"id": self.id }) | infoblox_netmri/api/remote/models/spm_interfaces_hub_locator_grid_remote.py | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SpmInterfacesHubLocatorGridRemote(RemoteModel):
"""
This table lists all SPM interfaces on which multiple end hosts were connected within the user specified period of time.
| ``id:`` The internal NetMRI identifier of the grid entry.
| ``attribute type:`` number
| ``Network:`` The name of the Network View associated.
| ``attribute type:`` string
| ``DeviceID:`` The NetMRI internal identifier for the device.
| ``attribute type:`` number
| ``DeviceType:`` The NetMRI-determined device type.
| ``attribute type:`` string
| ``DeviceName:`` The NetMRI name of the device; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``DeviceIPDotted:`` The management IP address of the device, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``DeviceIPNumeric:`` The numerical value of the device IP address.
| ``attribute type:`` number
| ``InterfaceID:`` The internal NetMRI identifier for the interface configured with this address.
| ``attribute type:`` number
| ``Interface:`` The name of this interface. This is typically the short name of the interface as it is identified in the console.
| ``attribute type:`` string
| ``ifIndex:`` The SNMP interface index of the interface configured with this address.
| ``attribute type:`` string
| ``ifDescr:`` The description of the interface, as set in the device's configuration file.
| ``attribute type:`` string
| ``ifAlias:`` Interface alias of this interface.
| ``attribute type:`` string
| ``ifType:`` The interface type of this interface.
| ``attribute type:`` string
| ``ifMAC:`` The interface Media Access Controller (MAC) address of this interface.
| ``attribute type:`` string
| ``ifOperStatus:`` The operational status (up/down) of this interface.
| ``attribute type:`` string
| ``ifAdminStatus:`` The configured status (up/down) of this interface.
| ``attribute type:`` string
| ``ifSpeed:`` The operational speed, in bps, of this interface.
| ``attribute type:`` number
| ``ifDuplex:`` The operational duplex of this interface.
| ``attribute type:`` string
| ``ifAdminDuplex:`` Admin setting of duplex, Auto indicates the device will try to negotiate with the other end to determine.
| ``attribute type:`` string
| ``PoEPower:`` Power draw of the supplied device in millivolts.
| ``attribute type:`` number
| ``PoEStatus:`` Status of the PoE connection.
| ``attribute type:`` string
| ``VlanIndex:`` The numerical VLAN number (VLAN ID).
| ``attribute type:`` number
| ``VlanName:`` The name of the VLAN on the root bridge.
| ``attribute type:`` string
| ``VlanID:`` The internal NetMRI identifier of the VLAN.
| ``attribute type:`` number
| ``VTPDomain:`` Management domain name if VLAN is VTP managed.
| ``attribute type:`` string
| ``EndHostCount:`` The number of end devices connected to this interface.
| ``attribute type:`` number
| ``PortStatus:`` Port Status. Valid values are "Used", "Free", or "Avail".
| ``attribute type:`` string
| ``Packets:`` Total inbound and outbound packets on this interface.
| ``attribute type:`` number
| ``Errors:`` Total inbound and outbound errors on this interface.
| ``attribute type:`` number
| ``ErrorPercentage:`` Percentage of errors on this interface.
| ``attribute type:`` number
| ``FirstSeen:`` The timestamp of when NetMRI first discovered this interface.
| ``attribute type:`` datetime
| ``LastSeen:`` The timestamp of when NetMRI last polled data from this interface.
| ``attribute type:`` datetime
| ``ifPortControlInd:`` A flag indicating whether or not this interface is available for port control actions.
| ``attribute type:`` bool
| ``ifSwitchPortMgmtInd:`` A flag indicating whether or not this interface is available in switch port management views.
| ``attribute type:`` bool
| ``ifName:`` The name of the switch interface. This is typically the short name of the interface as it is identified in the console.
| ``attribute type:`` string
| ``VirtualNetworkID:`` The internal identifier for the network which the device is associated to.
| ``attribute type:`` number
| ``ifIPDotted:`` The IP address in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``ifIPNumeric:`` The numerical value of the IP address.
| ``attribute type:`` number
| ``VirtualNetworkMemberName:`` The name of the VRF as configured on this device.
| ``attribute type:`` string
| ``ifTrunkStatus:`` Trunk Status
| ``attribute type:`` string
"""
properties = ("id",
"Network",
"DeviceID",
"DeviceType",
"DeviceName",
"DeviceIPDotted",
"DeviceIPNumeric",
"InterfaceID",
"Interface",
"ifIndex",
"ifDescr",
"ifAlias",
"ifType",
"ifMAC",
"ifOperStatus",
"ifAdminStatus",
"ifSpeed",
"ifDuplex",
"ifAdminDuplex",
"PoEPower",
"PoEStatus",
"VlanIndex",
"VlanName",
"VlanID",
"VTPDomain",
"EndHostCount",
"PortStatus",
"Packets",
"Errors",
"ErrorPercentage",
"FirstSeen",
"LastSeen",
"ifPortControlInd",
"ifSwitchPortMgmtInd",
"ifName",
"VirtualNetworkID",
"ifIPDotted",
"ifIPNumeric",
"VirtualNetworkMemberName",
"ifTrunkStatus",
)
@property
@check_api_availability
def meta(self):
"""
User custom fields
``attribute type:`` model
"""
return self.broker.meta(**{"id": self.id }) | 0.72487 | 0.529142 |
from ipaddress import ip_network
import logging
import unittest
from unittest.mock import patch
import openpeerpower.components.http as http
from openpeerpower.setup import async_setup_component
from openpeerpower.util.ssl import server_context_intermediate, server_context_modern
class TestView(http.OpenPeerPowerView):
"""Test the HTTP views."""
name = "test"
url = "/hello"
async def get(self, request):
"""Return a get request."""
return "hello"
async def test_registering_view_while_running(opp, aiohttp_client, aiohttp_unused_port):
"""Test that we can register a view while the server is running."""
await async_setup_component(
opp, http.DOMAIN, {http.DOMAIN: {http.CONF_SERVER_PORT: aiohttp_unused_port()}}
)
await opp.async_start()
# This raises a RuntimeError if app is frozen
opp.http.register_view(TestView)
class TestApiConfig(unittest.TestCase):
"""Test API configuration methods."""
def test_api_base_url_with_domain(opp):
"""Test setting API URL with domain."""
api_config = http.ApiConfig("example.com")
assert api_config.base_url == "http://example.com:8123"
def test_api_base_url_with_ip(opp):
"""Test setting API URL with IP."""
api_config = http.ApiConfig("1.1.1.1")
assert api_config.base_url == "http://1.1.1.1:8123"
def test_api_base_url_with_ip_and_port(opp):
"""Test setting API URL with IP and port."""
api_config = http.ApiConfig("1.1.1.1", 8124)
assert api_config.base_url == "http://1.1.1.1:8124"
def test_api_base_url_with_protocol(opp):
"""Test setting API URL with protocol."""
api_config = http.ApiConfig("https://example.com")
assert api_config.base_url == "https://example.com:8123"
def test_api_base_url_with_protocol_and_port(opp):
"""Test setting API URL with protocol and port."""
api_config = http.ApiConfig("https://example.com", 433)
assert api_config.base_url == "https://example.com:433"
def test_api_base_url_with_ssl_enable(opp):
"""Test setting API URL with use_ssl enabled."""
api_config = http.ApiConfig("example.com", use_ssl=True)
assert api_config.base_url == "https://example.com:8123"
def test_api_base_url_with_ssl_enable_and_port(opp):
"""Test setting API URL with use_ssl enabled and port."""
api_config = http.ApiConfig("1.1.1.1", use_ssl=True, port=8888)
assert api_config.base_url == "https://1.1.1.1:8888"
def test_api_base_url_with_protocol_and_ssl_enable(opp):
"""Test setting API URL with specific protocol and use_ssl enabled."""
api_config = http.ApiConfig("http://example.com", use_ssl=True)
assert api_config.base_url == "http://example.com:8123"
def test_api_base_url_removes_trailing_slash(opp):
"""Test a trialing slash is removed when setting the API URL."""
api_config = http.ApiConfig("http://example.com/")
assert api_config.base_url == "http://example.com:8123"
async def test_api_base_url_with_domain(opp):
"""Test setting API URL."""
result = await async_setup_component(
opp, "http", {"http": {"base_url": "example.com"}}
)
assert result
assert opp.config.api.base_url == "http://example.com"
async def test_api_base_url_with_ip(opp):
"""Test setting api url."""
result = await async_setup_component(
opp, "http", {"http": {"server_host": "1.1.1.1"}}
)
assert result
assert opp.config.api.base_url == "http://1.1.1.1:8123"
async def test_api_base_url_with_ip_port(opp):
"""Test setting api url."""
result = await async_setup_component(
opp, "http", {"http": {"base_url": "1.1.1.1:8124"}}
)
assert result
assert opp.config.api.base_url == "http://1.1.1.1:8124"
async def test_api_no_base_url(opp):
"""Test setting api url."""
result = await async_setup_component(opp, "http", {"http": {}})
assert result
assert opp.config.api.base_url == "http://127.0.0.1:8123"
async def test_api_base_url_removes_trailing_slash(opp):
"""Test setting api url."""
result = await async_setup_component(
opp, "http", {"http": {"base_url": "https://example.com/"}}
)
assert result
assert opp.config.api.base_url == "https://example.com"
async def test_not_log_password(opp, aiohttp_client, caplog, legacy_auth):
"""Test access with password doesn't get logged."""
assert await async_setup_component(opp, "api", {"http": {}})
client = await aiohttp_client(opp.http.app)
logging.getLogger("aiohttp.access").setLevel(logging.INFO)
resp = await client.get("/api/", params={"api_password": "<PASSWORD>"})
assert resp.status == 401
logs = caplog.text
# Ensure we don't log API passwords
assert "/api/" in logs
assert "some-pass" not in logs
async def test_proxy_config(opp):
"""Test use_x_forwarded_for must config together with trusted_proxies."""
assert (
await async_setup_component(
opp,
"http",
{
"http": {
http.CONF_USE_X_FORWARDED_FOR: True,
http.CONF_TRUSTED_PROXIES: ["127.0.0.1"],
}
},
)
is True
)
async def test_proxy_config_only_use_xff(opp):
"""Test use_x_forwarded_for must config together with trusted_proxies."""
assert (
await async_setup_component(
opp, "http", {"http": {http.CONF_USE_X_FORWARDED_FOR: True}}
)
is not True
)
async def test_proxy_config_only_trust_proxies(opp):
"""Test use_x_forwarded_for must config together with trusted_proxies."""
assert (
await async_setup_component(
opp, "http", {"http": {http.CONF_TRUSTED_PROXIES: ["127.0.0.1"]}}
)
is not True
)
async def test_ssl_profile_defaults_modern(opp):
"""Test default ssl profile."""
assert await async_setup_component(opp, "http", {}) is True
opp.http.ssl_certificate = "bla"
with patch("ssl.SSLContext.load_cert_chain"), patch(
"openpeerpower.util.ssl.server_context_modern",
side_effect=server_context_modern,
) as mock_context:
await opp.async_start()
await opp.async_block_till_done()
assert len(mock_context.mock_calls) == 1
async def test_ssl_profile_change_intermediate(opp):
"""Test setting ssl profile to intermediate."""
assert (
await async_setup_component(
opp, "http", {"http": {"ssl_profile": "intermediate"}}
)
is True
)
opp.http.ssl_certificate = "bla"
with patch("ssl.SSLContext.load_cert_chain"), patch(
"openpeerpower.util.ssl.server_context_intermediate",
side_effect=server_context_intermediate,
) as mock_context:
await opp.async_start()
await opp.async_block_till_done()
assert len(mock_context.mock_calls) == 1
async def test_ssl_profile_change_modern(opp):
"""Test setting ssl profile to modern."""
assert (
await async_setup_component(opp, "http", {"http": {"ssl_profile": "modern"}})
is True
)
opp.http.ssl_certificate = "bla"
with patch("ssl.SSLContext.load_cert_chain"), patch(
"openpeerpower.util.ssl.server_context_modern",
side_effect=server_context_modern,
) as mock_context:
await opp.async_start()
await opp.async_block_till_done()
assert len(mock_context.mock_calls) == 1
async def test_cors_defaults(opp):
"""Test the CORS default settings."""
with patch("openpeerpower.components.http.setup_cors") as mock_setup:
assert await async_setup_component(opp, "http", {})
assert len(mock_setup.mock_calls) == 1
assert mock_setup.mock_calls[0][1][1] == ["https://cast.open-peer-power.io"]
async def test_storing_config(opp, aiohttp_client, aiohttp_unused_port):
"""Test that we store last working config."""
config = {
http.CONF_SERVER_PORT: aiohttp_unused_port(),
"use_x_forwarded_for": True,
"trusted_proxies": ["192.168.1.100"],
}
assert await async_setup_component(opp, http.DOMAIN, {http.DOMAIN: config})
await opp.async_start()
restored = await opp.components.http.async_get_last_config()
restored["trusted_proxies"][0] = ip_network(restored["trusted_proxies"][0])
assert restored == http.HTTP_SCHEMA(config) | tests/components/http/test_init.py | from ipaddress import ip_network
import logging
import unittest
from unittest.mock import patch
import openpeerpower.components.http as http
from openpeerpower.setup import async_setup_component
from openpeerpower.util.ssl import server_context_intermediate, server_context_modern
class TestView(http.OpenPeerPowerView):
"""Test the HTTP views."""
name = "test"
url = "/hello"
async def get(self, request):
"""Return a get request."""
return "hello"
async def test_registering_view_while_running(opp, aiohttp_client, aiohttp_unused_port):
"""Test that we can register a view while the server is running."""
await async_setup_component(
opp, http.DOMAIN, {http.DOMAIN: {http.CONF_SERVER_PORT: aiohttp_unused_port()}}
)
await opp.async_start()
# This raises a RuntimeError if app is frozen
opp.http.register_view(TestView)
class TestApiConfig(unittest.TestCase):
"""Test API configuration methods."""
def test_api_base_url_with_domain(opp):
"""Test setting API URL with domain."""
api_config = http.ApiConfig("example.com")
assert api_config.base_url == "http://example.com:8123"
def test_api_base_url_with_ip(opp):
"""Test setting API URL with IP."""
api_config = http.ApiConfig("1.1.1.1")
assert api_config.base_url == "http://1.1.1.1:8123"
def test_api_base_url_with_ip_and_port(opp):
"""Test setting API URL with IP and port."""
api_config = http.ApiConfig("1.1.1.1", 8124)
assert api_config.base_url == "http://1.1.1.1:8124"
def test_api_base_url_with_protocol(opp):
"""Test setting API URL with protocol."""
api_config = http.ApiConfig("https://example.com")
assert api_config.base_url == "https://example.com:8123"
def test_api_base_url_with_protocol_and_port(opp):
"""Test setting API URL with protocol and port."""
api_config = http.ApiConfig("https://example.com", 433)
assert api_config.base_url == "https://example.com:433"
def test_api_base_url_with_ssl_enable(opp):
"""Test setting API URL with use_ssl enabled."""
api_config = http.ApiConfig("example.com", use_ssl=True)
assert api_config.base_url == "https://example.com:8123"
def test_api_base_url_with_ssl_enable_and_port(opp):
"""Test setting API URL with use_ssl enabled and port."""
api_config = http.ApiConfig("1.1.1.1", use_ssl=True, port=8888)
assert api_config.base_url == "https://1.1.1.1:8888"
def test_api_base_url_with_protocol_and_ssl_enable(opp):
"""Test setting API URL with specific protocol and use_ssl enabled."""
api_config = http.ApiConfig("http://example.com", use_ssl=True)
assert api_config.base_url == "http://example.com:8123"
def test_api_base_url_removes_trailing_slash(opp):
"""Test a trialing slash is removed when setting the API URL."""
api_config = http.ApiConfig("http://example.com/")
assert api_config.base_url == "http://example.com:8123"
async def test_api_base_url_with_domain(opp):
"""Test setting API URL."""
result = await async_setup_component(
opp, "http", {"http": {"base_url": "example.com"}}
)
assert result
assert opp.config.api.base_url == "http://example.com"
async def test_api_base_url_with_ip(opp):
"""Test setting api url."""
result = await async_setup_component(
opp, "http", {"http": {"server_host": "1.1.1.1"}}
)
assert result
assert opp.config.api.base_url == "http://1.1.1.1:8123"
async def test_api_base_url_with_ip_port(opp):
"""Test setting api url."""
result = await async_setup_component(
opp, "http", {"http": {"base_url": "1.1.1.1:8124"}}
)
assert result
assert opp.config.api.base_url == "http://1.1.1.1:8124"
async def test_api_no_base_url(opp):
"""Test setting api url."""
result = await async_setup_component(opp, "http", {"http": {}})
assert result
assert opp.config.api.base_url == "http://127.0.0.1:8123"
async def test_api_base_url_removes_trailing_slash(opp):
"""Test setting api url."""
result = await async_setup_component(
opp, "http", {"http": {"base_url": "https://example.com/"}}
)
assert result
assert opp.config.api.base_url == "https://example.com"
async def test_not_log_password(opp, aiohttp_client, caplog, legacy_auth):
"""Test access with password doesn't get logged."""
assert await async_setup_component(opp, "api", {"http": {}})
client = await aiohttp_client(opp.http.app)
logging.getLogger("aiohttp.access").setLevel(logging.INFO)
resp = await client.get("/api/", params={"api_password": "<PASSWORD>"})
assert resp.status == 401
logs = caplog.text
# Ensure we don't log API passwords
assert "/api/" in logs
assert "some-pass" not in logs
async def test_proxy_config(opp):
"""Test use_x_forwarded_for must config together with trusted_proxies."""
assert (
await async_setup_component(
opp,
"http",
{
"http": {
http.CONF_USE_X_FORWARDED_FOR: True,
http.CONF_TRUSTED_PROXIES: ["127.0.0.1"],
}
},
)
is True
)
async def test_proxy_config_only_use_xff(opp):
"""Test use_x_forwarded_for must config together with trusted_proxies."""
assert (
await async_setup_component(
opp, "http", {"http": {http.CONF_USE_X_FORWARDED_FOR: True}}
)
is not True
)
async def test_proxy_config_only_trust_proxies(opp):
"""Test use_x_forwarded_for must config together with trusted_proxies."""
assert (
await async_setup_component(
opp, "http", {"http": {http.CONF_TRUSTED_PROXIES: ["127.0.0.1"]}}
)
is not True
)
async def test_ssl_profile_defaults_modern(opp):
"""Test default ssl profile."""
assert await async_setup_component(opp, "http", {}) is True
opp.http.ssl_certificate = "bla"
with patch("ssl.SSLContext.load_cert_chain"), patch(
"openpeerpower.util.ssl.server_context_modern",
side_effect=server_context_modern,
) as mock_context:
await opp.async_start()
await opp.async_block_till_done()
assert len(mock_context.mock_calls) == 1
async def test_ssl_profile_change_intermediate(opp):
"""Test setting ssl profile to intermediate."""
assert (
await async_setup_component(
opp, "http", {"http": {"ssl_profile": "intermediate"}}
)
is True
)
opp.http.ssl_certificate = "bla"
with patch("ssl.SSLContext.load_cert_chain"), patch(
"openpeerpower.util.ssl.server_context_intermediate",
side_effect=server_context_intermediate,
) as mock_context:
await opp.async_start()
await opp.async_block_till_done()
assert len(mock_context.mock_calls) == 1
async def test_ssl_profile_change_modern(opp):
"""Test setting ssl profile to modern."""
assert (
await async_setup_component(opp, "http", {"http": {"ssl_profile": "modern"}})
is True
)
opp.http.ssl_certificate = "bla"
with patch("ssl.SSLContext.load_cert_chain"), patch(
"openpeerpower.util.ssl.server_context_modern",
side_effect=server_context_modern,
) as mock_context:
await opp.async_start()
await opp.async_block_till_done()
assert len(mock_context.mock_calls) == 1
async def test_cors_defaults(opp):
"""Test the CORS default settings."""
with patch("openpeerpower.components.http.setup_cors") as mock_setup:
assert await async_setup_component(opp, "http", {})
assert len(mock_setup.mock_calls) == 1
assert mock_setup.mock_calls[0][1][1] == ["https://cast.open-peer-power.io"]
async def test_storing_config(opp, aiohttp_client, aiohttp_unused_port):
"""Test that we store last working config."""
config = {
http.CONF_SERVER_PORT: aiohttp_unused_port(),
"use_x_forwarded_for": True,
"trusted_proxies": ["192.168.1.100"],
}
assert await async_setup_component(opp, http.DOMAIN, {http.DOMAIN: config})
await opp.async_start()
restored = await opp.components.http.async_get_last_config()
restored["trusted_proxies"][0] = ip_network(restored["trusted_proxies"][0])
assert restored == http.HTTP_SCHEMA(config) | 0.852122 | 0.370567 |
from bandwidth.api_helper import APIHelper
from bandwidth.voice.models.transcription import Transcription
class RecordingMetadataResponse(object):
"""Implementation of the 'RecordingMetadataResponse' model.
TODO: type model description here.
Attributes:
application_id (string): TODO: type description here.
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
parent_call_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
to (string): TODO: type description here.
mfrom (string): TODO: type description here.
transfer_caller_id (string): TODO: type description here.
transfer_to (string): TODO: type description here.
duration (string): Format is ISO-8601
direction (DirectionEnum): TODO: type description here.
channels (int): TODO: type description here.
start_time (datetime): TODO: type description here.
end_time (datetime): TODO: type description here.
file_format (FileFormatEnum): TODO: type description here.
status (Status1Enum): TODO: type description here.
media_url (string): TODO: type description here.
transcription (Transcription): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"application_id": 'applicationId',
"account_id": 'accountId',
"call_id": 'callId',
"parent_call_id": 'parentCallId',
"recording_id": 'recordingId',
"to": 'to',
"mfrom": 'from',
"transfer_caller_id": 'transferCallerId',
"transfer_to": 'transferTo',
"duration": 'duration',
"direction": 'direction',
"channels": 'channels',
"start_time": 'startTime',
"end_time": 'endTime',
"file_format": 'fileFormat',
"status": 'status',
"media_url": 'mediaUrl',
"transcription": 'transcription'
}
def __init__(self,
application_id=None,
account_id=None,
call_id=None,
parent_call_id=None,
recording_id=None,
to=None,
mfrom=None,
transfer_caller_id=None,
transfer_to=None,
duration=None,
direction=None,
channels=None,
start_time=None,
end_time=None,
file_format=None,
status=None,
media_url=None,
transcription=None):
"""Constructor for the RecordingMetadataResponse class"""
# Initialize members of the class
self.application_id = application_id
self.account_id = account_id
self.call_id = call_id
self.parent_call_id = parent_call_id
self.recording_id = recording_id
self.to = to
self.mfrom = mfrom
self.transfer_caller_id = transfer_caller_id
self.transfer_to = transfer_to
self.duration = duration
self.direction = direction
self.channels = channels
self.start_time = APIHelper.RFC3339DateTime(start_time) if start_time else None
self.end_time = APIHelper.RFC3339DateTime(end_time) if end_time else None
self.file_format = file_format
self.status = status
self.media_url = media_url
self.transcription = transcription
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
application_id = dictionary.get('applicationId')
account_id = dictionary.get('accountId')
call_id = dictionary.get('callId')
parent_call_id = dictionary.get('parentCallId')
recording_id = dictionary.get('recordingId')
to = dictionary.get('to')
mfrom = dictionary.get('from')
transfer_caller_id = dictionary.get('transferCallerId')
transfer_to = dictionary.get('transferTo')
duration = dictionary.get('duration')
direction = dictionary.get('direction')
channels = dictionary.get('channels')
start_time = APIHelper.RFC3339DateTime.from_value(dictionary.get("startTime")).datetime if dictionary.get("startTime") else None
end_time = APIHelper.RFC3339DateTime.from_value(dictionary.get("endTime")).datetime if dictionary.get("endTime") else None
file_format = dictionary.get('fileFormat')
status = dictionary.get('status')
media_url = dictionary.get('mediaUrl')
transcription = Transcription.from_dictionary(dictionary.get('transcription')) if dictionary.get('transcription') else None
# Return an object of this model
return cls(application_id,
account_id,
call_id,
parent_call_id,
recording_id,
to,
mfrom,
transfer_caller_id,
transfer_to,
duration,
direction,
channels,
start_time,
end_time,
file_format,
status,
media_url,
transcription) | build/lib/bandwidth/voice/models/recording_metadata_response.py | from bandwidth.api_helper import APIHelper
from bandwidth.voice.models.transcription import Transcription
class RecordingMetadataResponse(object):
"""Implementation of the 'RecordingMetadataResponse' model.
TODO: type model description here.
Attributes:
application_id (string): TODO: type description here.
account_id (string): TODO: type description here.
call_id (string): TODO: type description here.
parent_call_id (string): TODO: type description here.
recording_id (string): TODO: type description here.
to (string): TODO: type description here.
mfrom (string): TODO: type description here.
transfer_caller_id (string): TODO: type description here.
transfer_to (string): TODO: type description here.
duration (string): Format is ISO-8601
direction (DirectionEnum): TODO: type description here.
channels (int): TODO: type description here.
start_time (datetime): TODO: type description here.
end_time (datetime): TODO: type description here.
file_format (FileFormatEnum): TODO: type description here.
status (Status1Enum): TODO: type description here.
media_url (string): TODO: type description here.
transcription (Transcription): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"application_id": 'applicationId',
"account_id": 'accountId',
"call_id": 'callId',
"parent_call_id": 'parentCallId',
"recording_id": 'recordingId',
"to": 'to',
"mfrom": 'from',
"transfer_caller_id": 'transferCallerId',
"transfer_to": 'transferTo',
"duration": 'duration',
"direction": 'direction',
"channels": 'channels',
"start_time": 'startTime',
"end_time": 'endTime',
"file_format": 'fileFormat',
"status": 'status',
"media_url": 'mediaUrl',
"transcription": 'transcription'
}
def __init__(self,
application_id=None,
account_id=None,
call_id=None,
parent_call_id=None,
recording_id=None,
to=None,
mfrom=None,
transfer_caller_id=None,
transfer_to=None,
duration=None,
direction=None,
channels=None,
start_time=None,
end_time=None,
file_format=None,
status=None,
media_url=None,
transcription=None):
"""Constructor for the RecordingMetadataResponse class"""
# Initialize members of the class
self.application_id = application_id
self.account_id = account_id
self.call_id = call_id
self.parent_call_id = parent_call_id
self.recording_id = recording_id
self.to = to
self.mfrom = mfrom
self.transfer_caller_id = transfer_caller_id
self.transfer_to = transfer_to
self.duration = duration
self.direction = direction
self.channels = channels
self.start_time = APIHelper.RFC3339DateTime(start_time) if start_time else None
self.end_time = APIHelper.RFC3339DateTime(end_time) if end_time else None
self.file_format = file_format
self.status = status
self.media_url = media_url
self.transcription = transcription
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
application_id = dictionary.get('applicationId')
account_id = dictionary.get('accountId')
call_id = dictionary.get('callId')
parent_call_id = dictionary.get('parentCallId')
recording_id = dictionary.get('recordingId')
to = dictionary.get('to')
mfrom = dictionary.get('from')
transfer_caller_id = dictionary.get('transferCallerId')
transfer_to = dictionary.get('transferTo')
duration = dictionary.get('duration')
direction = dictionary.get('direction')
channels = dictionary.get('channels')
start_time = APIHelper.RFC3339DateTime.from_value(dictionary.get("startTime")).datetime if dictionary.get("startTime") else None
end_time = APIHelper.RFC3339DateTime.from_value(dictionary.get("endTime")).datetime if dictionary.get("endTime") else None
file_format = dictionary.get('fileFormat')
status = dictionary.get('status')
media_url = dictionary.get('mediaUrl')
transcription = Transcription.from_dictionary(dictionary.get('transcription')) if dictionary.get('transcription') else None
# Return an object of this model
return cls(application_id,
account_id,
call_id,
parent_call_id,
recording_id,
to,
mfrom,
transfer_caller_id,
transfer_to,
duration,
direction,
channels,
start_time,
end_time,
file_format,
status,
media_url,
transcription) | 0.482917 | 0.133613 |
from flask import Blueprint, jsonify
from brainzutils import cache
from critiquebrainz.db.review import supported_languages, ENTITY_TYPES
import critiquebrainz.db.review as db_review
from critiquebrainz.db import (
vote as db_vote,
exceptions as db_exceptions,
spam_report as db_spam_report,
revision as db_revision,
users as db_users,
REVIEW_RATING_MIN,
REVIEW_RATING_MAX,
REVIEW_TEXT_MIN_LENGTH,
REVIEW_TEXT_MAX_LENGTH
)
from critiquebrainz.ws.exceptions import NotFound, AccessDenied, InvalidRequest, LimitExceeded, MissingDataError
from critiquebrainz.ws.oauth import oauth
from critiquebrainz.ws.parser import Parser
from critiquebrainz.decorators import crossdomain
review_bp = Blueprint('ws_review', __name__)
REVIEW_CACHE_NAMESPACE = "Review"
def get_review_or_404(review_id):
"""Get a review using review ID or raise error 404"""
try:
review = db_review.get_by_id(review_id)
except db_exceptions.NoDataFoundException:
raise NotFound("Can't find a review with ID: {review_id}".format(review_id=review_id))
return review
@review_bp.route('/<uuid:review_id>', methods=['GET'])
@crossdomain()
def review_entity_handler(review_id):
"""Get review with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/b7575c23-13d5-4adc-ac09-2f55a647d3de \\
-X GET
**Response Example:**
.. code-block:: json
{
"review": {
"created": "Tue, 10 Aug 2010 00:00:00 GMT",
"edits": 0,
"entity_id": "03e0a99c-3530-4e64-8f50-6592325c2082",
"entity_type": "release_group",
"id": "b7575c23-13d5-4adc-ac09-2f55a647d3de",
"language": "en",
"last_updated": "Tue, 10 Aug 2010 00:00:00 GMT",
"license": {
"full_name": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported",
"id": "CC BY-NC-SA 3.0",
"info_url": "https:\/\/creativecommons.org\/licenses\/by-nc-sa\/3.0\/"
},
"popularity": 0,
"source": "BBC",
"source_url": "http:\/\/www.bbc.co.uk\/music\/reviews\/3vfd",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"user": {
"created": "Wed, 07 May 2014 14:55:23 GMT",
"display_name": "<NAME>",
"id": "f5857a65-1eb1-4574-8843-ae6195de16fa",
"karma": 0,
"user_type": "Noob"
},
"votes": {
"positive": 0,
"negative": 0
}
}
}
:statuscode 200: no error
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
return jsonify(review=db_review.to_dict(review))
@review_bp.route('/<uuid:review_id>/revisions', methods=['GET'])
@crossdomain()
def review_revisions_handler(review_id):
"""Get revisions of review with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/b7575c23-13d5-4adc-ac09-2f55a647d3de/revisions \\
-X GET
**Response Example:**
.. code-block:: json
{
"revisions": [
{
"id": 1,
"review_id": "b7575c23-13d5-4adc-ac09-2f55a647d3de",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"timestamp": "Tue, 10 Aug 2010 00:00:00 GMT",
"votes_negative": 0,
"votes_positive": 0
}
]
}
:statuscode 200: no error
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
revisions = db_revision.get(review_id, limit=None)
count = len(revisions)
for i, r in enumerate(revisions):
r.update(id=count - i)
return jsonify(revisions=revisions)
@review_bp.route('/<uuid:review_id>/revisions/<int:rev>', methods=['GET'])
@crossdomain()
def review_revision_entity_handler(review_id, rev):
"""Get a particular revisions of review with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/b7575c23-13d5-4adc-ac09-2f55a647d3de/revisions/1 \\
-X GET
**Response Example:**
.. code-block:: json
{
"revision": {
"id": 1,
"review_id": "b7575c23-13d5-4adc-ac09-2f55a647d3de",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"timestamp": "Tue, 10 Aug 2010 00:00:00 GMT",
"votes_negative": 0,
"votes_positive": 0
}
}
:statuscode 200: no error
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
count = db_revision.get_count(review["id"])
if rev > count:
raise NotFound("Can't find the revision you are looking for.")
revision = db_revision.get(review_id, offset=count - rev)[0]
revision.update(id=rev)
return jsonify(revision=revision)
@review_bp.route('/<uuid:review_id>', methods=['DELETE'])
@oauth.require_auth('review')
@crossdomain()
def review_delete_handler(review_id, user):
"""Delete review with a specified UUID.
**OAuth scope:** review
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd" \\
-X DELETE \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:statuscode 200: success
:statuscode 403: access denied
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
if str(review["user_id"]) != user.id:
raise AccessDenied
db_review.delete(review_id)
return jsonify(message='Request processed successfully')
@review_bp.route('/<uuid:review_id>', methods=['POST'])
@oauth.require_auth('review')
@crossdomain()
def review_modify_handler(review_id, user):
"""Update review with a specified UUID.
**OAuth scope:** review
:json string text: Text part of review, min length is 25, max is 5000 **(optional)**
:json integer rating: Rating part of review, min is 1, max is 5 **(optional)**
**NOTE:** Please provide only those parameters which need to be updated
:statuscode 200: success
:statuscode 400: invalid request
:statuscode 403: access denied
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
def fetch_params(review):
try:
text = Parser.string('json', 'text', min=REVIEW_TEXT_MIN_LENGTH, max=REVIEW_TEXT_MAX_LENGTH)
except MissingDataError:
text = review['text']
try:
rating = Parser.int('json', 'rating', min=REVIEW_RATING_MIN, max=REVIEW_RATING_MAX)
except MissingDataError:
rating = review['rating']
if text is None and rating is None:
raise InvalidRequest(desc='Review must have either text or rating')
return text, rating
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
if str(review["user_id"]) != user.id:
raise AccessDenied
text, rating = fetch_params(review)
if (text == review['text']) and (rating == review['rating']):
return jsonify(message='Request processed successfully', review=dict(id=review["id"]))
db_review.update(
review_id=review_id,
drafted=review["is_draft"],
text=text,
rating=rating
)
return jsonify(message='Request processed successfully',
review=dict(id=review["id"]))
@review_bp.route('/', methods=['GET'])
@crossdomain()
def review_list_handler():
"""Get list of reviews.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/?limit=1&offset=50" \\
-X GET
**Response Example:**
.. code-block:: json
{
"count": 9197,
"limit": 1,
"offset": 50,
"reviews": [
{
"created": "Fri, 16 May 2008 00:00:00 GMT",
"edits": 0,
"entity_id": "09259937-6477-3959-8b10-af1cbaea8e6e",
"entity_type": "release_group",
"id": "c807d0b4-0dd0-43fe-a7c4-d29bb61f389e",
"language": "en",
"last_updated": "Fri, 16 May 2008 00:00:00 GMT",
"license": {
"full_name": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported",
"id": "CC BY-NC-SA 3.0",
"info_url": "https:\/\/creativecommons.org\/licenses\/by-nc-sa\/3.0\/"
},
"popularity": 0,
"source": "BBC",
"source_url": "http:\/\/www.bbc.co.uk\/music\/reviews\/vh54",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"user": {
"created": "Wed, 07 May 2014 16:20:47 GMT",
"display_name": "<NAME>",
"id": "3bf3fe0c-6db2-4746-bcf1-f39912113852",
"karma": 0,
"user_type": "Noob"
},
"votes": {
"positive": 0,
"negative": 0
}
}
]
}
:json uuid entity_id: UUID of the release group that is being reviewed
:json string entity_type: One of the supported reviewable entities. 'release_group' or 'event' etc. **(optional)**
:query user_id: user's UUID **(optional)**
:query sort: ``popularity`` or ``created`` **(optional)**
:query limit: results limit, min is 0, max is 50, default is 50 **(optional)**
:query offset: result offset, default is 0 **(optional)**
:query language: language code (ISO 639-1) **(optional)**
:resheader Content-Type: *application/json*
"""
# TODO: This checking is added to keep old clients working and needs to be removed.
release_group = Parser.uuid('uri', 'release_group', optional=True)
if release_group:
entity_id = release_group
entity_type = 'release_group'
else:
entity_id = Parser.uuid('uri', 'entity_id', optional=True)
entity_type = Parser.string('uri', 'entity_type', valid_values=ENTITY_TYPES, optional=True)
user_id = Parser.uuid('uri', 'user_id', optional=True)
# TODO: "rating" sort value is deprecated and needs to be removed.
sort = Parser.string('uri', 'sort', valid_values=['popularity', 'created', 'rating'], optional=True)
if sort == 'rating':
sort = 'popularity'
limit = Parser.int('uri', 'limit', min=1, max=50, optional=True) or 50
offset = Parser.int('uri', 'offset', optional=True) or 0
language = Parser.string('uri', 'language', min=2, max=3, optional=True)
if language and language not in supported_languages:
raise InvalidRequest(desc='Unsupported language')
# TODO(roman): Ideally caching logic should live inside the model. Otherwise it
# becomes hard to track all this stuff.
cache_key = cache.gen_key('list', entity_id, user_id, sort, limit, offset, language)
cached_result = cache.get(cache_key, REVIEW_CACHE_NAMESPACE)
if cached_result:
reviews = cached_result['reviews']
count = cached_result['count']
else:
reviews, count = db_review.list_reviews(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
sort=sort,
limit=limit,
offset=offset,
language=language,
)
reviews = [db_review.to_dict(p) for p in reviews]
cache.set(cache_key, {
'reviews': reviews,
'count': count,
}, namespace=REVIEW_CACHE_NAMESPACE)
return jsonify(limit=limit, offset=offset, count=count, reviews=reviews)
@review_bp.route('/', methods=['POST'])
@oauth.require_auth('review')
@crossdomain()
def review_post_handler(user):
"""Publish a review.
**OAuth scope:** review
:reqheader Content-Type: *application/json*
:json uuid entity_id: UUID of the entity that is being reviewed
:json string entity_type: One of the supported reviewable entities. 'release_group' or 'event' etc.
:json string text: Text part of review, min length is 25, max is 5000 **(optional)**
:json integer rating: Rating part of review, min is 1, max is 5 **(optional)**
:json string license_choice: license ID
:json string lang: language code (ISO 639-1), default is ``en`` **(optional)**
:json boolean is_draft: whether the review should be saved as a draft or not, default is ``False`` **(optional)**
**NOTE:** You must provide some text or rating for the review.
:resheader Content-Type: *application/json*
"""
def fetch_params():
is_draft = Parser.bool('json', 'is_draft', optional=True) or False
if is_draft:
REVIEW_TEXT_MIN_LENGTH = None
entity_id = Parser.uuid('json', 'entity_id')
entity_type = Parser.string('json', 'entity_type', valid_values=ENTITY_TYPES)
text = Parser.string('json', 'text', min=REVIEW_TEXT_MIN_LENGTH, max=REVIEW_TEXT_MAX_LENGTH, optional=True)
rating = Parser.int('json', 'rating', min=REVIEW_RATING_MIN, max=REVIEW_RATING_MAX, optional=True)
license_choice = Parser.string('json', 'license_choice')
language = Parser.string('json', 'language', min=2, max=3, optional=True) or 'en'
if text is None and rating is None:
raise InvalidRequest(desc='Review must have either text or rating')
if language and language not in supported_languages:
raise InvalidRequest(desc='Unsupported language')
if db_review.list_reviews(user_id=user.id, entity_id=entity_id)[1]:
raise InvalidRequest(desc='You have already published a review for this album')
return entity_id, entity_type, text, rating, license_choice, language, is_draft
if user.is_review_limit_exceeded:
raise LimitExceeded('You have exceeded your limit of reviews per day.')
entity_id, entity_type, text, rating, license_choice, language, is_draft = fetch_params()
review = db_review.create(
user_id=user.id,
entity_id=entity_id,
entity_type=entity_type,
text=text,
rating=rating,
license_id=license_choice,
language=language,
is_draft=is_draft,
)
return jsonify(message='Request processed successfully', id=review["id"])
@review_bp.route('/languages', methods=['GET'])
@crossdomain()
def languages_list_handler():
"""Get list of supported review languages (language codes from ISO 639-1).
**Example Request:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/languages \\
-X GET
**Example Response:**
.. code-block:: json
{
"languages": [
"aa",
"ab",
"af",
"ak",
"yo",
"za",
"zh",
"zu"
]
}
:resheader Content-Type: *application/json*
"""
return jsonify(languages=supported_languages)
@review_bp.route('/<uuid:review_id>/vote', methods=['GET'])
@oauth.require_auth('vote')
@crossdomain()
def review_vote_entity_handler(review_id, user):
"""Get your vote for a specified review.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd/vote" \\
-X GET \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"vote": {
"vote": true,
"voted_at": "Thu, 22 Dec 2016 11:49:56 GMT"
}
}
**OAuth scope:** vote
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
try:
vote = db_vote.get(user_id=user.id, revision_id=review["last_revision"]["id"])
except db_exceptions.NoDataFoundException:
raise NotFound("Can't find your vote for this review.")
return jsonify(vote)
@review_bp.route('/<uuid:review_id>/vote', methods=['PUT'])
@oauth.require_auth('vote')
@crossdomain()
def review_vote_put_handler(review_id, user):
"""Set your vote for a specified review.
**OAuth scope:** vote
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd/vote" \\
-X PUT \\
-H "Content-type: application/json" \\
-H "Authorization: Bearer <access token>" \\
-d '{"vote":true}'
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:json boolean vote: ``true`` if upvote, ``false`` if downvote
**NOTE:** Voting on reviews without text is not allowed.
:statuscode 200: success
:statuscode 400: invalid request (see source)
:statuscode 403: daily vote limit exceeded
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
def fetch_params():
vote = Parser.bool('json', 'vote')
return vote
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
vote = fetch_params()
if str(review["user_id"]) == user.id:
raise InvalidRequest(desc='You cannot rate your own review.')
if review["text"] is None:
raise InvalidRequest(desc='Voting on reviews without text is not allowed.')
if user.is_vote_limit_exceeded and not db_users.has_voted(user.id, review_id):
raise LimitExceeded('You have exceeded your limit of votes per day.')
db_vote.submit(
user_id=user.id,
revision_id=review["last_revision"]["id"],
vote=vote, # overwrites an existing vote, if needed
)
return jsonify(message='Request processed successfully')
@review_bp.route('/<uuid:review_id>/vote', methods=['DELETE'])
@oauth.require_auth('vote')
@crossdomain()
def review_vote_delete_handler(review_id, user):
"""Delete your vote for a specified review.
**OAuth scope:** vote
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd/vote" \\
-X DELETE \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
try:
vote = db_vote.get(user_id=user.id, revision_id=review["last_revision"]["id"])
except db_exceptions.NoDataFoundException:
raise InvalidRequest("Review is not rated yet.")
db_vote.delete(user_id=vote["user_id"], revision_id=vote["revision_id"])
return jsonify(message="Request processed successfully")
@review_bp.route('/<uuid:review_id>/report', methods=['POST'])
@oauth.require_auth('vote')
@crossdomain()
def review_spam_report_handler(review_id, user):
"""Create spam report for a specified review.
**OAuth scope:** vote
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
if review["user_id"] == user.id:
raise InvalidRequest('own')
db_spam_report.create(review["last_revision"]["id"], user.id, "Spam")
return jsonify(message="Spam report created successfully") | critiquebrainz/ws/review/views.py | from flask import Blueprint, jsonify
from brainzutils import cache
from critiquebrainz.db.review import supported_languages, ENTITY_TYPES
import critiquebrainz.db.review as db_review
from critiquebrainz.db import (
vote as db_vote,
exceptions as db_exceptions,
spam_report as db_spam_report,
revision as db_revision,
users as db_users,
REVIEW_RATING_MIN,
REVIEW_RATING_MAX,
REVIEW_TEXT_MIN_LENGTH,
REVIEW_TEXT_MAX_LENGTH
)
from critiquebrainz.ws.exceptions import NotFound, AccessDenied, InvalidRequest, LimitExceeded, MissingDataError
from critiquebrainz.ws.oauth import oauth
from critiquebrainz.ws.parser import Parser
from critiquebrainz.decorators import crossdomain
review_bp = Blueprint('ws_review', __name__)
REVIEW_CACHE_NAMESPACE = "Review"
def get_review_or_404(review_id):
"""Get a review using review ID or raise error 404"""
try:
review = db_review.get_by_id(review_id)
except db_exceptions.NoDataFoundException:
raise NotFound("Can't find a review with ID: {review_id}".format(review_id=review_id))
return review
@review_bp.route('/<uuid:review_id>', methods=['GET'])
@crossdomain()
def review_entity_handler(review_id):
"""Get review with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/b7575c23-13d5-4adc-ac09-2f55a647d3de \\
-X GET
**Response Example:**
.. code-block:: json
{
"review": {
"created": "Tue, 10 Aug 2010 00:00:00 GMT",
"edits": 0,
"entity_id": "03e0a99c-3530-4e64-8f50-6592325c2082",
"entity_type": "release_group",
"id": "b7575c23-13d5-4adc-ac09-2f55a647d3de",
"language": "en",
"last_updated": "Tue, 10 Aug 2010 00:00:00 GMT",
"license": {
"full_name": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported",
"id": "CC BY-NC-SA 3.0",
"info_url": "https:\/\/creativecommons.org\/licenses\/by-nc-sa\/3.0\/"
},
"popularity": 0,
"source": "BBC",
"source_url": "http:\/\/www.bbc.co.uk\/music\/reviews\/3vfd",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"user": {
"created": "Wed, 07 May 2014 14:55:23 GMT",
"display_name": "<NAME>",
"id": "f5857a65-1eb1-4574-8843-ae6195de16fa",
"karma": 0,
"user_type": "Noob"
},
"votes": {
"positive": 0,
"negative": 0
}
}
}
:statuscode 200: no error
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
return jsonify(review=db_review.to_dict(review))
@review_bp.route('/<uuid:review_id>/revisions', methods=['GET'])
@crossdomain()
def review_revisions_handler(review_id):
"""Get revisions of review with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/b7575c23-13d5-4adc-ac09-2f55a647d3de/revisions \\
-X GET
**Response Example:**
.. code-block:: json
{
"revisions": [
{
"id": 1,
"review_id": "b7575c23-13d5-4adc-ac09-2f55a647d3de",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"timestamp": "Tue, 10 Aug 2010 00:00:00 GMT",
"votes_negative": 0,
"votes_positive": 0
}
]
}
:statuscode 200: no error
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
revisions = db_revision.get(review_id, limit=None)
count = len(revisions)
for i, r in enumerate(revisions):
r.update(id=count - i)
return jsonify(revisions=revisions)
@review_bp.route('/<uuid:review_id>/revisions/<int:rev>', methods=['GET'])
@crossdomain()
def review_revision_entity_handler(review_id, rev):
"""Get a particular revisions of review with a specified UUID.
**Request Example:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/b7575c23-13d5-4adc-ac09-2f55a647d3de/revisions/1 \\
-X GET
**Response Example:**
.. code-block:: json
{
"revision": {
"id": 1,
"review_id": "b7575c23-13d5-4adc-ac09-2f55a647d3de",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"timestamp": "Tue, 10 Aug 2010 00:00:00 GMT",
"votes_negative": 0,
"votes_positive": 0
}
}
:statuscode 200: no error
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
count = db_revision.get_count(review["id"])
if rev > count:
raise NotFound("Can't find the revision you are looking for.")
revision = db_revision.get(review_id, offset=count - rev)[0]
revision.update(id=rev)
return jsonify(revision=revision)
@review_bp.route('/<uuid:review_id>', methods=['DELETE'])
@oauth.require_auth('review')
@crossdomain()
def review_delete_handler(review_id, user):
"""Delete review with a specified UUID.
**OAuth scope:** review
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd" \\
-X DELETE \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:statuscode 200: success
:statuscode 403: access denied
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
if str(review["user_id"]) != user.id:
raise AccessDenied
db_review.delete(review_id)
return jsonify(message='Request processed successfully')
@review_bp.route('/<uuid:review_id>', methods=['POST'])
@oauth.require_auth('review')
@crossdomain()
def review_modify_handler(review_id, user):
"""Update review with a specified UUID.
**OAuth scope:** review
:json string text: Text part of review, min length is 25, max is 5000 **(optional)**
:json integer rating: Rating part of review, min is 1, max is 5 **(optional)**
**NOTE:** Please provide only those parameters which need to be updated
:statuscode 200: success
:statuscode 400: invalid request
:statuscode 403: access denied
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
def fetch_params(review):
try:
text = Parser.string('json', 'text', min=REVIEW_TEXT_MIN_LENGTH, max=REVIEW_TEXT_MAX_LENGTH)
except MissingDataError:
text = review['text']
try:
rating = Parser.int('json', 'rating', min=REVIEW_RATING_MIN, max=REVIEW_RATING_MAX)
except MissingDataError:
rating = review['rating']
if text is None and rating is None:
raise InvalidRequest(desc='Review must have either text or rating')
return text, rating
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
if str(review["user_id"]) != user.id:
raise AccessDenied
text, rating = fetch_params(review)
if (text == review['text']) and (rating == review['rating']):
return jsonify(message='Request processed successfully', review=dict(id=review["id"]))
db_review.update(
review_id=review_id,
drafted=review["is_draft"],
text=text,
rating=rating
)
return jsonify(message='Request processed successfully',
review=dict(id=review["id"]))
@review_bp.route('/', methods=['GET'])
@crossdomain()
def review_list_handler():
"""Get list of reviews.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/?limit=1&offset=50" \\
-X GET
**Response Example:**
.. code-block:: json
{
"count": 9197,
"limit": 1,
"offset": 50,
"reviews": [
{
"created": "Fri, 16 May 2008 00:00:00 GMT",
"edits": 0,
"entity_id": "09259937-6477-3959-8b10-af1cbaea8e6e",
"entity_type": "release_group",
"id": "c807d0b4-0dd0-43fe-a7c4-d29bb61f389e",
"language": "en",
"last_updated": "Fri, 16 May 2008 00:00:00 GMT",
"license": {
"full_name": "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported",
"id": "CC BY-NC-SA 3.0",
"info_url": "https:\/\/creativecommons.org\/licenses\/by-nc-sa\/3.0\/"
},
"popularity": 0,
"source": "BBC",
"source_url": "http:\/\/www.bbc.co.uk\/music\/reviews\/vh54",
"text": "TEXT CONTENT OF REVIEW",
"rating": 5,
"user": {
"created": "Wed, 07 May 2014 16:20:47 GMT",
"display_name": "<NAME>",
"id": "3bf3fe0c-6db2-4746-bcf1-f39912113852",
"karma": 0,
"user_type": "Noob"
},
"votes": {
"positive": 0,
"negative": 0
}
}
]
}
:json uuid entity_id: UUID of the release group that is being reviewed
:json string entity_type: One of the supported reviewable entities. 'release_group' or 'event' etc. **(optional)**
:query user_id: user's UUID **(optional)**
:query sort: ``popularity`` or ``created`` **(optional)**
:query limit: results limit, min is 0, max is 50, default is 50 **(optional)**
:query offset: result offset, default is 0 **(optional)**
:query language: language code (ISO 639-1) **(optional)**
:resheader Content-Type: *application/json*
"""
# TODO: This checking is added to keep old clients working and needs to be removed.
release_group = Parser.uuid('uri', 'release_group', optional=True)
if release_group:
entity_id = release_group
entity_type = 'release_group'
else:
entity_id = Parser.uuid('uri', 'entity_id', optional=True)
entity_type = Parser.string('uri', 'entity_type', valid_values=ENTITY_TYPES, optional=True)
user_id = Parser.uuid('uri', 'user_id', optional=True)
# TODO: "rating" sort value is deprecated and needs to be removed.
sort = Parser.string('uri', 'sort', valid_values=['popularity', 'created', 'rating'], optional=True)
if sort == 'rating':
sort = 'popularity'
limit = Parser.int('uri', 'limit', min=1, max=50, optional=True) or 50
offset = Parser.int('uri', 'offset', optional=True) or 0
language = Parser.string('uri', 'language', min=2, max=3, optional=True)
if language and language not in supported_languages:
raise InvalidRequest(desc='Unsupported language')
# TODO(roman): Ideally caching logic should live inside the model. Otherwise it
# becomes hard to track all this stuff.
cache_key = cache.gen_key('list', entity_id, user_id, sort, limit, offset, language)
cached_result = cache.get(cache_key, REVIEW_CACHE_NAMESPACE)
if cached_result:
reviews = cached_result['reviews']
count = cached_result['count']
else:
reviews, count = db_review.list_reviews(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
sort=sort,
limit=limit,
offset=offset,
language=language,
)
reviews = [db_review.to_dict(p) for p in reviews]
cache.set(cache_key, {
'reviews': reviews,
'count': count,
}, namespace=REVIEW_CACHE_NAMESPACE)
return jsonify(limit=limit, offset=offset, count=count, reviews=reviews)
@review_bp.route('/', methods=['POST'])
@oauth.require_auth('review')
@crossdomain()
def review_post_handler(user):
"""Publish a review.
**OAuth scope:** review
:reqheader Content-Type: *application/json*
:json uuid entity_id: UUID of the entity that is being reviewed
:json string entity_type: One of the supported reviewable entities. 'release_group' or 'event' etc.
:json string text: Text part of review, min length is 25, max is 5000 **(optional)**
:json integer rating: Rating part of review, min is 1, max is 5 **(optional)**
:json string license_choice: license ID
:json string lang: language code (ISO 639-1), default is ``en`` **(optional)**
:json boolean is_draft: whether the review should be saved as a draft or not, default is ``False`` **(optional)**
**NOTE:** You must provide some text or rating for the review.
:resheader Content-Type: *application/json*
"""
def fetch_params():
is_draft = Parser.bool('json', 'is_draft', optional=True) or False
if is_draft:
REVIEW_TEXT_MIN_LENGTH = None
entity_id = Parser.uuid('json', 'entity_id')
entity_type = Parser.string('json', 'entity_type', valid_values=ENTITY_TYPES)
text = Parser.string('json', 'text', min=REVIEW_TEXT_MIN_LENGTH, max=REVIEW_TEXT_MAX_LENGTH, optional=True)
rating = Parser.int('json', 'rating', min=REVIEW_RATING_MIN, max=REVIEW_RATING_MAX, optional=True)
license_choice = Parser.string('json', 'license_choice')
language = Parser.string('json', 'language', min=2, max=3, optional=True) or 'en'
if text is None and rating is None:
raise InvalidRequest(desc='Review must have either text or rating')
if language and language not in supported_languages:
raise InvalidRequest(desc='Unsupported language')
if db_review.list_reviews(user_id=user.id, entity_id=entity_id)[1]:
raise InvalidRequest(desc='You have already published a review for this album')
return entity_id, entity_type, text, rating, license_choice, language, is_draft
if user.is_review_limit_exceeded:
raise LimitExceeded('You have exceeded your limit of reviews per day.')
entity_id, entity_type, text, rating, license_choice, language, is_draft = fetch_params()
review = db_review.create(
user_id=user.id,
entity_id=entity_id,
entity_type=entity_type,
text=text,
rating=rating,
license_id=license_choice,
language=language,
is_draft=is_draft,
)
return jsonify(message='Request processed successfully', id=review["id"])
@review_bp.route('/languages', methods=['GET'])
@crossdomain()
def languages_list_handler():
"""Get list of supported review languages (language codes from ISO 639-1).
**Example Request:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/languages \\
-X GET
**Example Response:**
.. code-block:: json
{
"languages": [
"aa",
"ab",
"af",
"ak",
"yo",
"za",
"zh",
"zu"
]
}
:resheader Content-Type: *application/json*
"""
return jsonify(languages=supported_languages)
@review_bp.route('/<uuid:review_id>/vote', methods=['GET'])
@oauth.require_auth('vote')
@crossdomain()
def review_vote_entity_handler(review_id, user):
"""Get your vote for a specified review.
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd/vote" \\
-X GET \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"vote": {
"vote": true,
"voted_at": "Thu, 22 Dec 2016 11:49:56 GMT"
}
}
**OAuth scope:** vote
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
try:
vote = db_vote.get(user_id=user.id, revision_id=review["last_revision"]["id"])
except db_exceptions.NoDataFoundException:
raise NotFound("Can't find your vote for this review.")
return jsonify(vote)
@review_bp.route('/<uuid:review_id>/vote', methods=['PUT'])
@oauth.require_auth('vote')
@crossdomain()
def review_vote_put_handler(review_id, user):
"""Set your vote for a specified review.
**OAuth scope:** vote
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd/vote" \\
-X PUT \\
-H "Content-type: application/json" \\
-H "Authorization: Bearer <access token>" \\
-d '{"vote":true}'
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:json boolean vote: ``true`` if upvote, ``false`` if downvote
**NOTE:** Voting on reviews without text is not allowed.
:statuscode 200: success
:statuscode 400: invalid request (see source)
:statuscode 403: daily vote limit exceeded
:statuscode 404: review not found
:resheader Content-Type: *application/json*
"""
def fetch_params():
vote = Parser.bool('json', 'vote')
return vote
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
vote = fetch_params()
if str(review["user_id"]) == user.id:
raise InvalidRequest(desc='You cannot rate your own review.')
if review["text"] is None:
raise InvalidRequest(desc='Voting on reviews without text is not allowed.')
if user.is_vote_limit_exceeded and not db_users.has_voted(user.id, review_id):
raise LimitExceeded('You have exceeded your limit of votes per day.')
db_vote.submit(
user_id=user.id,
revision_id=review["last_revision"]["id"],
vote=vote, # overwrites an existing vote, if needed
)
return jsonify(message='Request processed successfully')
@review_bp.route('/<uuid:review_id>/vote', methods=['DELETE'])
@oauth.require_auth('vote')
@crossdomain()
def review_vote_delete_handler(review_id, user):
"""Delete your vote for a specified review.
**OAuth scope:** vote
**Request Example:**
.. code-block:: bash
$ curl "https://critiquebrainz.org/ws/1/review/9cb11424-d070-4ac1-8771-a8703ae5cccd/vote" \\
-X DELETE \\
-H "Authorization: Bearer <access token>"
**Response Example:**
.. code-block:: json
{
"message": "Request processed successfully"
}
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
try:
vote = db_vote.get(user_id=user.id, revision_id=review["last_revision"]["id"])
except db_exceptions.NoDataFoundException:
raise InvalidRequest("Review is not rated yet.")
db_vote.delete(user_id=vote["user_id"], revision_id=vote["revision_id"])
return jsonify(message="Request processed successfully")
@review_bp.route('/<uuid:review_id>/report', methods=['POST'])
@oauth.require_auth('vote')
@crossdomain()
def review_spam_report_handler(review_id, user):
"""Create spam report for a specified review.
**OAuth scope:** vote
:resheader Content-Type: *application/json*
"""
review = get_review_or_404(review_id)
if review["is_hidden"]:
raise NotFound("Review has been hidden.")
if review["user_id"] == user.id:
raise InvalidRequest('own')
db_spam_report.create(review["last_revision"]["id"], user.id, "Spam")
return jsonify(message="Spam report created successfully") | 0.628977 | 0.165931 |
from __future__ import unicode_literals
import pytest
from django.core.urlresolvers import reverse
from machina.apps.forum.signals import forum_viewed
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.context_managers import mock_signal_receiver
from machina.test.factories import create_forum
from machina.test.factories import create_link_forum
from machina.test.testcases import BaseClientTestCase
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
assign_perm = get_class('forum_permission.shortcuts', 'assign_perm')
remove_perm = get_class('forum_permission.shortcuts', 'remove_perm')
class TestForumView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Permission handler
self.perm_handler = PermissionHandler()
# Set up a top-level forum and a link forum
self.top_level_forum = create_forum()
self.top_level_link = create_link_forum(link_redirects=True)
# Assign some permissions
assign_perm('can_read_forum', self.user, self.top_level_forum)
assign_perm('can_read_forum', self.user, self.top_level_link)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum:forum', kwargs={
'slug': self.top_level_forum.slug, 'pk': self.top_level_forum.id})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_cannot_be_browsed_by_users_who_cannot_read_the_forum(self):
# Setup
remove_perm('can_read_forum', self.user, self.top_level_forum)
correct_url = reverse('forum:forum', kwargs={
'slug': self.top_level_forum.slug, 'pk': self.top_level_forum.id})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
def test_triggers_a_viewed_signal(self):
# Setup
forum_url = reverse('forum:forum', kwargs={
'slug': self.top_level_forum.slug, 'pk': self.top_level_forum.id})
link_url = reverse('forum:forum', kwargs={
'slug': self.top_level_link.slug, 'pk': self.top_level_link.id})
# Run & check
for url in [forum_url, link_url]:
with mock_signal_receiver(forum_viewed) as receiver:
self.client.get(url)
assert receiver.call_count == 1
def test_redirects_to_the_link_of_a_link_forum(self):
# Setup
correct_url = reverse('forum:forum', kwargs={
'slug': self.top_level_link.slug, 'pk': self.top_level_link.id})
# Run
response = self.client.get(correct_url)
# Check
assert response.status_code == 302
assert response['Location'] == self.top_level_link.link
def test_increases_the_redirects_counter_of_a_link_forum(self):
# Setup
correct_url = reverse('forum:forum', kwargs={
'slug': self.top_level_link.slug, 'pk': self.top_level_link.id})
initial_redirects_count = self.top_level_link.link_redirects_count
# Run
self.client.get(correct_url)
# Check
top_level_link = self.top_level_link.__class__._default_manager.get(
pk=self.top_level_link.pk)
assert top_level_link.link_redirects_count == initial_redirects_count + 1 | tests/functional/forum/test_views.py |
from __future__ import unicode_literals
import pytest
from django.core.urlresolvers import reverse
from machina.apps.forum.signals import forum_viewed
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.context_managers import mock_signal_receiver
from machina.test.factories import create_forum
from machina.test.factories import create_link_forum
from machina.test.testcases import BaseClientTestCase
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
assign_perm = get_class('forum_permission.shortcuts', 'assign_perm')
remove_perm = get_class('forum_permission.shortcuts', 'remove_perm')
class TestForumView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Permission handler
self.perm_handler = PermissionHandler()
# Set up a top-level forum and a link forum
self.top_level_forum = create_forum()
self.top_level_link = create_link_forum(link_redirects=True)
# Assign some permissions
assign_perm('can_read_forum', self.user, self.top_level_forum)
assign_perm('can_read_forum', self.user, self.top_level_link)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum:forum', kwargs={
'slug': self.top_level_forum.slug, 'pk': self.top_level_forum.id})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_cannot_be_browsed_by_users_who_cannot_read_the_forum(self):
# Setup
remove_perm('can_read_forum', self.user, self.top_level_forum)
correct_url = reverse('forum:forum', kwargs={
'slug': self.top_level_forum.slug, 'pk': self.top_level_forum.id})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
def test_triggers_a_viewed_signal(self):
# Setup
forum_url = reverse('forum:forum', kwargs={
'slug': self.top_level_forum.slug, 'pk': self.top_level_forum.id})
link_url = reverse('forum:forum', kwargs={
'slug': self.top_level_link.slug, 'pk': self.top_level_link.id})
# Run & check
for url in [forum_url, link_url]:
with mock_signal_receiver(forum_viewed) as receiver:
self.client.get(url)
assert receiver.call_count == 1
def test_redirects_to_the_link_of_a_link_forum(self):
# Setup
correct_url = reverse('forum:forum', kwargs={
'slug': self.top_level_link.slug, 'pk': self.top_level_link.id})
# Run
response = self.client.get(correct_url)
# Check
assert response.status_code == 302
assert response['Location'] == self.top_level_link.link
def test_increases_the_redirects_counter_of_a_link_forum(self):
# Setup
correct_url = reverse('forum:forum', kwargs={
'slug': self.top_level_link.slug, 'pk': self.top_level_link.id})
initial_redirects_count = self.top_level_link.link_redirects_count
# Run
self.client.get(correct_url)
# Check
top_level_link = self.top_level_link.__class__._default_manager.get(
pk=self.top_level_link.pk)
assert top_level_link.link_redirects_count == initial_redirects_count + 1 | 0.500488 | 0.166777 |
def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Unknown'},
{'abbr': 'fsob',
'code': 1022,
'title': 'Forecast sensitivity to observations'},
{'abbr': 'fsow',
'code': 1023,
'title': 'Forecast sensitivity to observations wave'},
{'abbr': 'dahc', 'code': 1024, 'title': 'Daily archive hindcast'},
{'abbr': 'oper', 'code': 1025, 'title': 'Atmospheric model'},
{'abbr': 'scda',
'code': 1026,
'title': 'Atmospheric model',
'units': 'short cutoff'},
{'abbr': 'scwv', 'code': 1027, 'title': 'Wave model', 'units': 'short cutoff'},
{'abbr': 'dcda',
'code': 1028,
'title': 'Atmospheric model',
'units': 'delayed cutoff'},
{'abbr': 'dcwv',
'code': 1029,
'title': 'Wave model',
'units': 'delayed cutoff'},
{'abbr': 'enda', 'code': 1030, 'title': 'Ensemble data assimilation'},
{'abbr': 'efho', 'code': 1032, 'title': 'Ensemble forecast hindcast overlap'},
{'abbr': 'enfh', 'code': 1033, 'title': 'Ensemble forecast hindcasts'},
{'abbr': 'efov', 'code': 1034, 'title': 'Ensemble forecast overlap'},
{'abbr': 'enfo', 'code': 1035, 'title': 'Ensemble prediction system'},
{'abbr': 'sens', 'code': 1036, 'title': 'Sensitivity forecast'},
{'abbr': 'maed', 'code': 1037, 'title': 'Multianalysis ensemble data'},
{'abbr': 'amap', 'code': 1038, 'title': 'Analysis for multianalysis project'},
{'abbr': 'efhc',
'code': 1039,
'title': 'Ensemble forecast hindcasts',
'units': 'obsolete'},
{'abbr': 'efhs',
'code': 1040,
'title': 'Ensemble forecast hindcast statistics'},
{'abbr': 'toga', 'code': 1041, 'title': 'TOGA'},
{'abbr': 'cher', 'code': 1042, 'title': 'Chernobyl'},
{'abbr': 'mnth', 'code': 1043, 'title': 'Monthly means'},
{'abbr': 'supd', 'code': 1044, 'title': 'Deterministic supplementary data'},
{'abbr': 'wave', 'code': 1045, 'title': 'Wave model'},
{'abbr': 'ocea', 'code': 1046, 'title': 'Ocean'},
{'abbr': 'fgge', 'code': 1047, 'title': 'FGGE'},
{'abbr': 'egrr', 'code': 1050, 'title': 'Bracknell'},
{'abbr': 'kwbc', 'code': 1051, 'title': 'Washington'},
{'abbr': 'edzw', 'code': 1052, 'title': 'Offenbach'},
{'abbr': 'lfpw', 'code': 1053, 'title': 'Toulouse'},
{'abbr': 'rjtd', 'code': 1054, 'title': 'Tokyo'},
{'abbr': 'cwao', 'code': 1055, 'title': 'Montreal'},
{'abbr': 'ammc', 'code': 1056, 'title': 'Melbourne'},
{'abbr': 'efas',
'code': 1057,
'title': 'European Flood Awareness System',
'units': 'EFAS'},
{'abbr': 'efse',
'code': 1058,
'title': 'European Flood Awareness System (EFAS) seasonal forecasts'},
{'abbr': 'efcl',
'code': 1059,
'title': 'European Flood Awareness System (EFAS) climatology'},
{'abbr': 'wfas',
'code': 1060,
'title': 'Global flood awareness system',
'units': 'GLOFAS'},
{'abbr': 'wfcl',
'code': 1061,
'title': 'Global flood awareness system (GLOFAS) climatology'},
{'abbr': 'msdc',
'code': 1070,
'title': 'Monthly standard deviation and covariance'},
{'abbr': 'moda', 'code': 1071, 'title': 'Monthly means of daily means'},
{'abbr': 'monr',
'code': 1072,
'title': "Monthly means using G. Boer's step function"},
{'abbr': 'mnvr',
'code': 1073,
'title': "Monthly variance and covariance data using G. Boer's step "
'function'},
{'abbr': 'msda',
'code': 1074,
'title': 'Monthly standard deviation and covariance of daily means'},
{'abbr': 'mdfa',
'code': 1075,
'title': 'Monthly means of daily forecast accumulations'},
{'abbr': 'dacl', 'code': 1076, 'title': 'Daily climatology'},
{'abbr': 'wehs',
'code': 1077,
'title': 'Wave ensemble forecast hindcast statistics'},
{'abbr': 'ewho',
'code': 1078,
'title': 'Ensemble forecast wave hindcast overlap'},
{'abbr': 'enwh', 'code': 1079, 'title': 'Ensemble forecast wave hindcasts'},
{'abbr': 'wamo', 'code': 1080, 'title': 'Wave monthly means'},
{'abbr': 'waef', 'code': 1081, 'title': 'Wave ensemble forecast'},
{'abbr': 'wasf', 'code': 1082, 'title': 'Wave seasonal forecast'},
{'abbr': 'mawv', 'code': 1083, 'title': 'Multianalysis wave data'},
{'abbr': 'ewhc',
'code': 1084,
'title': 'Wave ensemble forecast hindcast',
'units': 'obsolete'},
{'abbr': 'wvhc', 'code': 1085, 'title': 'Wave hindcast'},
{'abbr': 'weov', 'code': 1086, 'title': 'Wave ensemble forecast overlap'},
{'abbr': 'wavm', 'code': 1087, 'title': 'Wave model', 'units': 'standalone'},
{'abbr': 'ewda', 'code': 1088, 'title': 'Ensemble wave data assimilation'},
{'abbr': 'dacw', 'code': 1089, 'title': 'Daily climatology wave'},
{'abbr': 'seas', 'code': 1090, 'title': 'Seasonal forecast'},
{'abbr': 'sfmm',
'code': 1091,
'title': 'Seasonal forecast atmospheric monthly means'},
{'abbr': 'swmm',
'code': 1092,
'title': 'Seasonal forecast wave monthly means'},
{'abbr': 'mofc', 'code': 1093, 'title': 'Monthly forecast'},
{'abbr': 'mofm', 'code': 1094, 'title': 'Monthly forecast means'},
{'abbr': 'wamf', 'code': 1095, 'title': 'Wave monthly forecast'},
{'abbr': 'wmfm', 'code': 1096, 'title': 'Wave monthly forecast means'},
{'abbr': 'smma', 'code': 1097, 'title': 'Seasonal monthly means anomalies'},
{'abbr': 'seap', 'code': 1110, 'title': 'Sensitive area prediction'},
{'abbr': 'mnfc', 'code': 1200, 'title': 'Real-time'},
{'abbr': 'mnfh', 'code': 1201, 'title': 'Hindcasts'},
{'abbr': 'mnfa', 'code': 1202, 'title': 'Anomalies'},
{'abbr': 'mnfw', 'code': 1203, 'title': 'Wave real-time'},
{'abbr': 'mfhw', 'code': 1204, 'title': 'Monthly forecast hindcasts wave'},
{'abbr': 'mfaw', 'code': 1205, 'title': 'Wave anomalies'},
{'abbr': 'mnfm', 'code': 1206, 'title': 'Real-time means'},
{'abbr': 'mfhm', 'code': 1207, 'title': 'Hindcast means'},
{'abbr': 'mfam', 'code': 1208, 'title': 'Anomaly means'},
{'abbr': 'mfwm', 'code': 1209, 'title': 'Wave real-time means'},
{'abbr': 'mhwm', 'code': 1210, 'title': 'Wave hindcast means'},
{'abbr': 'mawm', 'code': 1211, 'title': 'Wave anomaly means'},
{'abbr': 'mmsf', 'code': 1220, 'title': 'Multi-model seasonal forecast'},
{'abbr': 'msmm',
'code': 1221,
'title': 'Multi-model seasonal forecast atmospheric monthly means'},
{'abbr': 'wams', 'code': 1222, 'title': 'Multi-model seasonal forecast wave'},
{'abbr': 'mswm',
'code': 1223,
'title': 'Multi-model seasonal forecast wave monthly means'},
{'abbr': 'mmsa',
'code': 1224,
'title': 'Multi-model seasonal forecast monthly anomalies'},
{'abbr': 'mmaf', 'code': 1230, 'title': 'Multi-model multi-annual forecast'},
{'abbr': 'mmam',
'code': 1231,
'title': 'Multi-model multi-annual forecast means'},
{'abbr': 'mmaw',
'code': 1232,
'title': 'Multi-model multi-annual forecast wave'},
{'abbr': 'mmwm',
'code': 1233,
'title': 'Multi-model multi-annual forecast wave means'},
{'abbr': 'esmm', 'code': 1240, 'title': 'Combined multi-model monthly means'},
{'abbr': 'ehmm',
'code': 1241,
'title': 'Combined multi-model hindcast monthly means'},
{'abbr': 'edmm',
'code': 1242,
'title': 'Ensemble data assimilation monthly means'},
{'abbr': 'edmo',
'code': 1243,
'title': 'Ensemble data assimilation monthly means of daily means'},
{'abbr': 'ewmo',
'code': 1244,
'title': 'Ensemble wave data assimilation monthly means of daily means'},
{'abbr': 'ewmm',
'code': 1245,
'title': 'Ensemble wave data assimilation monthly means'},
{'abbr': 'espd', 'code': 1246, 'title': 'Ensemble supplementary data'},
{'abbr': 'lwda', 'code': 1247, 'title': 'Long window daily archive'},
{'abbr': 'lwwv', 'code': 1248, 'title': 'Long window wave'},
{'abbr': 'elda',
'code': 1249,
'title': 'Ensemble Long window Data Assimilation'},
{'abbr': 'ewla',
'code': 1250,
'title': 'Ensemble Wave Long window data Assimilation'},
{'abbr': 'wamd', 'code': 1251, 'title': 'Wave monthly means of daily means'},
{'abbr': 'gfas', 'code': 1252, 'title': 'Global fire assimilation system'},
{'abbr': 'cnrm', 'code': 2231, 'title': 'Meteo France climate centre'},
{'abbr': 'mpic', 'code': 2232, 'title': 'Max Plank Institute'},
{'abbr': 'ukmo', 'code': 2233, 'title': 'UKMO climate centre'}) | pyeccodes/defs/mars/stream_table.py | def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Unknown'},
{'abbr': 'fsob',
'code': 1022,
'title': 'Forecast sensitivity to observations'},
{'abbr': 'fsow',
'code': 1023,
'title': 'Forecast sensitivity to observations wave'},
{'abbr': 'dahc', 'code': 1024, 'title': 'Daily archive hindcast'},
{'abbr': 'oper', 'code': 1025, 'title': 'Atmospheric model'},
{'abbr': 'scda',
'code': 1026,
'title': 'Atmospheric model',
'units': 'short cutoff'},
{'abbr': 'scwv', 'code': 1027, 'title': 'Wave model', 'units': 'short cutoff'},
{'abbr': 'dcda',
'code': 1028,
'title': 'Atmospheric model',
'units': 'delayed cutoff'},
{'abbr': 'dcwv',
'code': 1029,
'title': 'Wave model',
'units': 'delayed cutoff'},
{'abbr': 'enda', 'code': 1030, 'title': 'Ensemble data assimilation'},
{'abbr': 'efho', 'code': 1032, 'title': 'Ensemble forecast hindcast overlap'},
{'abbr': 'enfh', 'code': 1033, 'title': 'Ensemble forecast hindcasts'},
{'abbr': 'efov', 'code': 1034, 'title': 'Ensemble forecast overlap'},
{'abbr': 'enfo', 'code': 1035, 'title': 'Ensemble prediction system'},
{'abbr': 'sens', 'code': 1036, 'title': 'Sensitivity forecast'},
{'abbr': 'maed', 'code': 1037, 'title': 'Multianalysis ensemble data'},
{'abbr': 'amap', 'code': 1038, 'title': 'Analysis for multianalysis project'},
{'abbr': 'efhc',
'code': 1039,
'title': 'Ensemble forecast hindcasts',
'units': 'obsolete'},
{'abbr': 'efhs',
'code': 1040,
'title': 'Ensemble forecast hindcast statistics'},
{'abbr': 'toga', 'code': 1041, 'title': 'TOGA'},
{'abbr': 'cher', 'code': 1042, 'title': 'Chernobyl'},
{'abbr': 'mnth', 'code': 1043, 'title': 'Monthly means'},
{'abbr': 'supd', 'code': 1044, 'title': 'Deterministic supplementary data'},
{'abbr': 'wave', 'code': 1045, 'title': 'Wave model'},
{'abbr': 'ocea', 'code': 1046, 'title': 'Ocean'},
{'abbr': 'fgge', 'code': 1047, 'title': 'FGGE'},
{'abbr': 'egrr', 'code': 1050, 'title': 'Bracknell'},
{'abbr': 'kwbc', 'code': 1051, 'title': 'Washington'},
{'abbr': 'edzw', 'code': 1052, 'title': 'Offenbach'},
{'abbr': 'lfpw', 'code': 1053, 'title': 'Toulouse'},
{'abbr': 'rjtd', 'code': 1054, 'title': 'Tokyo'},
{'abbr': 'cwao', 'code': 1055, 'title': 'Montreal'},
{'abbr': 'ammc', 'code': 1056, 'title': 'Melbourne'},
{'abbr': 'efas',
'code': 1057,
'title': 'European Flood Awareness System',
'units': 'EFAS'},
{'abbr': 'efse',
'code': 1058,
'title': 'European Flood Awareness System (EFAS) seasonal forecasts'},
{'abbr': 'efcl',
'code': 1059,
'title': 'European Flood Awareness System (EFAS) climatology'},
{'abbr': 'wfas',
'code': 1060,
'title': 'Global flood awareness system',
'units': 'GLOFAS'},
{'abbr': 'wfcl',
'code': 1061,
'title': 'Global flood awareness system (GLOFAS) climatology'},
{'abbr': 'msdc',
'code': 1070,
'title': 'Monthly standard deviation and covariance'},
{'abbr': 'moda', 'code': 1071, 'title': 'Monthly means of daily means'},
{'abbr': 'monr',
'code': 1072,
'title': "Monthly means using G. Boer's step function"},
{'abbr': 'mnvr',
'code': 1073,
'title': "Monthly variance and covariance data using G. Boer's step "
'function'},
{'abbr': 'msda',
'code': 1074,
'title': 'Monthly standard deviation and covariance of daily means'},
{'abbr': 'mdfa',
'code': 1075,
'title': 'Monthly means of daily forecast accumulations'},
{'abbr': 'dacl', 'code': 1076, 'title': 'Daily climatology'},
{'abbr': 'wehs',
'code': 1077,
'title': 'Wave ensemble forecast hindcast statistics'},
{'abbr': 'ewho',
'code': 1078,
'title': 'Ensemble forecast wave hindcast overlap'},
{'abbr': 'enwh', 'code': 1079, 'title': 'Ensemble forecast wave hindcasts'},
{'abbr': 'wamo', 'code': 1080, 'title': 'Wave monthly means'},
{'abbr': 'waef', 'code': 1081, 'title': 'Wave ensemble forecast'},
{'abbr': 'wasf', 'code': 1082, 'title': 'Wave seasonal forecast'},
{'abbr': 'mawv', 'code': 1083, 'title': 'Multianalysis wave data'},
{'abbr': 'ewhc',
'code': 1084,
'title': 'Wave ensemble forecast hindcast',
'units': 'obsolete'},
{'abbr': 'wvhc', 'code': 1085, 'title': 'Wave hindcast'},
{'abbr': 'weov', 'code': 1086, 'title': 'Wave ensemble forecast overlap'},
{'abbr': 'wavm', 'code': 1087, 'title': 'Wave model', 'units': 'standalone'},
{'abbr': 'ewda', 'code': 1088, 'title': 'Ensemble wave data assimilation'},
{'abbr': 'dacw', 'code': 1089, 'title': 'Daily climatology wave'},
{'abbr': 'seas', 'code': 1090, 'title': 'Seasonal forecast'},
{'abbr': 'sfmm',
'code': 1091,
'title': 'Seasonal forecast atmospheric monthly means'},
{'abbr': 'swmm',
'code': 1092,
'title': 'Seasonal forecast wave monthly means'},
{'abbr': 'mofc', 'code': 1093, 'title': 'Monthly forecast'},
{'abbr': 'mofm', 'code': 1094, 'title': 'Monthly forecast means'},
{'abbr': 'wamf', 'code': 1095, 'title': 'Wave monthly forecast'},
{'abbr': 'wmfm', 'code': 1096, 'title': 'Wave monthly forecast means'},
{'abbr': 'smma', 'code': 1097, 'title': 'Seasonal monthly means anomalies'},
{'abbr': 'seap', 'code': 1110, 'title': 'Sensitive area prediction'},
{'abbr': 'mnfc', 'code': 1200, 'title': 'Real-time'},
{'abbr': 'mnfh', 'code': 1201, 'title': 'Hindcasts'},
{'abbr': 'mnfa', 'code': 1202, 'title': 'Anomalies'},
{'abbr': 'mnfw', 'code': 1203, 'title': 'Wave real-time'},
{'abbr': 'mfhw', 'code': 1204, 'title': 'Monthly forecast hindcasts wave'},
{'abbr': 'mfaw', 'code': 1205, 'title': 'Wave anomalies'},
{'abbr': 'mnfm', 'code': 1206, 'title': 'Real-time means'},
{'abbr': 'mfhm', 'code': 1207, 'title': 'Hindcast means'},
{'abbr': 'mfam', 'code': 1208, 'title': 'Anomaly means'},
{'abbr': 'mfwm', 'code': 1209, 'title': 'Wave real-time means'},
{'abbr': 'mhwm', 'code': 1210, 'title': 'Wave hindcast means'},
{'abbr': 'mawm', 'code': 1211, 'title': 'Wave anomaly means'},
{'abbr': 'mmsf', 'code': 1220, 'title': 'Multi-model seasonal forecast'},
{'abbr': 'msmm',
'code': 1221,
'title': 'Multi-model seasonal forecast atmospheric monthly means'},
{'abbr': 'wams', 'code': 1222, 'title': 'Multi-model seasonal forecast wave'},
{'abbr': 'mswm',
'code': 1223,
'title': 'Multi-model seasonal forecast wave monthly means'},
{'abbr': 'mmsa',
'code': 1224,
'title': 'Multi-model seasonal forecast monthly anomalies'},
{'abbr': 'mmaf', 'code': 1230, 'title': 'Multi-model multi-annual forecast'},
{'abbr': 'mmam',
'code': 1231,
'title': 'Multi-model multi-annual forecast means'},
{'abbr': 'mmaw',
'code': 1232,
'title': 'Multi-model multi-annual forecast wave'},
{'abbr': 'mmwm',
'code': 1233,
'title': 'Multi-model multi-annual forecast wave means'},
{'abbr': 'esmm', 'code': 1240, 'title': 'Combined multi-model monthly means'},
{'abbr': 'ehmm',
'code': 1241,
'title': 'Combined multi-model hindcast monthly means'},
{'abbr': 'edmm',
'code': 1242,
'title': 'Ensemble data assimilation monthly means'},
{'abbr': 'edmo',
'code': 1243,
'title': 'Ensemble data assimilation monthly means of daily means'},
{'abbr': 'ewmo',
'code': 1244,
'title': 'Ensemble wave data assimilation monthly means of daily means'},
{'abbr': 'ewmm',
'code': 1245,
'title': 'Ensemble wave data assimilation monthly means'},
{'abbr': 'espd', 'code': 1246, 'title': 'Ensemble supplementary data'},
{'abbr': 'lwda', 'code': 1247, 'title': 'Long window daily archive'},
{'abbr': 'lwwv', 'code': 1248, 'title': 'Long window wave'},
{'abbr': 'elda',
'code': 1249,
'title': 'Ensemble Long window Data Assimilation'},
{'abbr': 'ewla',
'code': 1250,
'title': 'Ensemble Wave Long window data Assimilation'},
{'abbr': 'wamd', 'code': 1251, 'title': 'Wave monthly means of daily means'},
{'abbr': 'gfas', 'code': 1252, 'title': 'Global fire assimilation system'},
{'abbr': 'cnrm', 'code': 2231, 'title': 'Meteo France climate centre'},
{'abbr': 'mpic', 'code': 2232, 'title': 'Max Plank Institute'},
{'abbr': 'ukmo', 'code': 2233, 'title': 'UKMO climate centre'}) | 0.798933 | 0.547222 |
import os
if __name__ == '__main__':
amplxe_cl_path = '/opt/intel/vtune_amplifier/bin64/amplxe-cl'
# dataset and parameters
dataset_dir_path = '/home/yche/GitRepos/ScanOptimizing/dataset/'
dataset_path_lst = map(lambda file_name: dataset_dir_path + file_name,
['snap_orkut', 'webgraph_webbase', 'webgraph_twitter', 'snap_friendster'])
eps_lst = map(str, [float(i + 1) / 10 for i in range(9)])
# exec path
ppscan0_path = '/home/yche/GitRepos/ScanOptimizing/pSCAN-refactor/build/pSCANParallelExp0'
ppscan1_path = '/home/yche/GitRepos/ScanOptimizing/pSCAN-refactor/build/pSCANParallelExp1'
exec_path_lst = [ppscan0_path, ppscan1_path]
# profiler tag, and filter list
profiler_tag_lst = ['advanced-hotspots', 'memory-access', 'general-exploration']
advanced_hotspots_tag_lst = ['-column="CPU Time:Self"', '-column="Instructions Retired:Self"']
memory_access_tag_lst = ['-column="CPU Time:Self"', '-column="Loads:Self"']
general_exploration_tag_lst = ['-column="Front-End Bound:Self"', '-column="Bad Speculation:Self"',
'-column="Back-End Bound:Core Bound:Self"',
'-column="Back-End Bound:Memory Bound:Self"',
'-column="Retiring:Self"']
profiler_filter_lst = [' '.join(advanced_hotspots_tag_lst), ' '.join(memory_access_tag_lst),
' '.join(general_exploration_tag_lst)]
profiler_tag_abbr_lst = ['ah', 'macc', 'ge']
# result file root folder
result_dir = '/home/yche/workspace/vtune_data/'
csv_report_root_dir = '/home/yche/workspace/vtune_report/'
for dataset_path in dataset_path_lst:
dataset_name = dataset_path.split(os.sep)[-1].split('_')[-1]
my_folder_prefix = csv_report_root_dir + dataset_name
os.system('mkdir -p ' + my_folder_prefix)
for eps in eps_lst:
for exec_path in exec_path_lst:
exec_name = exec_path.split(os.sep)[-1]
for idx, profiler_tag in enumerate(profiler_tag_lst):
profiler_filter_tag = profiler_filter_lst[idx]
result_path = result_dir + '-'.join([dataset_name, exec_name, eps, profiler_tag])
csv_file_path = my_folder_prefix + os.sep + '-'.join(
[eps, exec_name, profiler_tag_abbr_lst[idx]])
my_cmd = ' '.join([
amplxe_cl_path, '-R top-down', '-result-dir', result_path,
'-group-by function', '-filter function=GraphParallelExp::IntersectNeighborSets',
profiler_filter_tag, '-report-output', csv_file_path, '-format csv -csv-delimiter comma'])
print my_cmd
os.system(my_cmd) | paper/deprecated/set-intersection/report_set_intersection_statistics.py | import os
if __name__ == '__main__':
amplxe_cl_path = '/opt/intel/vtune_amplifier/bin64/amplxe-cl'
# dataset and parameters
dataset_dir_path = '/home/yche/GitRepos/ScanOptimizing/dataset/'
dataset_path_lst = map(lambda file_name: dataset_dir_path + file_name,
['snap_orkut', 'webgraph_webbase', 'webgraph_twitter', 'snap_friendster'])
eps_lst = map(str, [float(i + 1) / 10 for i in range(9)])
# exec path
ppscan0_path = '/home/yche/GitRepos/ScanOptimizing/pSCAN-refactor/build/pSCANParallelExp0'
ppscan1_path = '/home/yche/GitRepos/ScanOptimizing/pSCAN-refactor/build/pSCANParallelExp1'
exec_path_lst = [ppscan0_path, ppscan1_path]
# profiler tag, and filter list
profiler_tag_lst = ['advanced-hotspots', 'memory-access', 'general-exploration']
advanced_hotspots_tag_lst = ['-column="CPU Time:Self"', '-column="Instructions Retired:Self"']
memory_access_tag_lst = ['-column="CPU Time:Self"', '-column="Loads:Self"']
general_exploration_tag_lst = ['-column="Front-End Bound:Self"', '-column="Bad Speculation:Self"',
'-column="Back-End Bound:Core Bound:Self"',
'-column="Back-End Bound:Memory Bound:Self"',
'-column="Retiring:Self"']
profiler_filter_lst = [' '.join(advanced_hotspots_tag_lst), ' '.join(memory_access_tag_lst),
' '.join(general_exploration_tag_lst)]
profiler_tag_abbr_lst = ['ah', 'macc', 'ge']
# result file root folder
result_dir = '/home/yche/workspace/vtune_data/'
csv_report_root_dir = '/home/yche/workspace/vtune_report/'
for dataset_path in dataset_path_lst:
dataset_name = dataset_path.split(os.sep)[-1].split('_')[-1]
my_folder_prefix = csv_report_root_dir + dataset_name
os.system('mkdir -p ' + my_folder_prefix)
for eps in eps_lst:
for exec_path in exec_path_lst:
exec_name = exec_path.split(os.sep)[-1]
for idx, profiler_tag in enumerate(profiler_tag_lst):
profiler_filter_tag = profiler_filter_lst[idx]
result_path = result_dir + '-'.join([dataset_name, exec_name, eps, profiler_tag])
csv_file_path = my_folder_prefix + os.sep + '-'.join(
[eps, exec_name, profiler_tag_abbr_lst[idx]])
my_cmd = ' '.join([
amplxe_cl_path, '-R top-down', '-result-dir', result_path,
'-group-by function', '-filter function=GraphParallelExp::IntersectNeighborSets',
profiler_filter_tag, '-report-output', csv_file_path, '-format csv -csv-delimiter comma'])
print my_cmd
os.system(my_cmd) | 0.176069 | 0.074433 |
# -*- coding: utf-8 -*-
import os, sys, traceback
# Config
from Components.config import *
from Components.Sources.StaticText import StaticText
# Screen
from Components.ActionMap import ActionMap
from Components.ScrollLabel import ScrollLabel
from enigma import eSize, ePoint, getDesktop
from Screens.Screen import Screen
from Tools.Directories import fileExists, resolveFilename, SCOPE_PLUGINS
class ShowLogScreen(Screen):
def __init__(self, session, logFile, titleText,firstLineText, lastLineText):
Screen.__init__(self, session)
self.skinName = ["TestBox", "Console"]
title = ""
text = ""
self.logFile = logFile
self.titleText = titleText
self.firstLineText = firstLineText
self.lastLineText = lastLineText
self["text"] = ScrollLabel("")
self["actions"] = ActionMap(["WizardActions", "DirectionActions", "ChannelSelectBaseActions"],
{
"ok": self.cancel,
"back": self.cancel,
"up": self["text"].pageUp,
"down": self["text"].pageDown,
"left": self["text"].pageUp,
"right": self["text"].pageDown,
"nextBouquet": self["text"].lastPage,
"prevBouquet": self.firstPage,
}, -1)
self.onLayoutFinish.append(self.readLog)
def cancel(self):
self.close()
def setText(self, text):
self["text"].setText(text)
def close(self):
Screen.close(self)
def firstPage(self):
self["text"].long_text.move(ePoint(0,0))
self["text"].updateScrollbar()
def readLog(self):
# Set title and text
title = _("Show Log file") + " - AutoTimer " + self.titleText
text = _("Reading log file...\n") + self.logFile + _("\nCancel?")
self.setTitle(title)
self.setText(text)
if not fileExists(self.logFile):
self.setText(_("No file found"))
elif not os.path.getsize(self.logFile) == 0:
file = open(self.logFile, "r")
text = file.read()
file.close()
try:
if self.firstLineText != "":
text = self.firstLineText + text
if self.lastLineText != "":
text = text + self.lastLineText
self.setText(text)
self["text"].lastPage()
except:
pass | autotimer/src/ShowLogScreen.py | # -*- coding: utf-8 -*-
import os, sys, traceback
# Config
from Components.config import *
from Components.Sources.StaticText import StaticText
# Screen
from Components.ActionMap import ActionMap
from Components.ScrollLabel import ScrollLabel
from enigma import eSize, ePoint, getDesktop
from Screens.Screen import Screen
from Tools.Directories import fileExists, resolveFilename, SCOPE_PLUGINS
class ShowLogScreen(Screen):
def __init__(self, session, logFile, titleText,firstLineText, lastLineText):
Screen.__init__(self, session)
self.skinName = ["TestBox", "Console"]
title = ""
text = ""
self.logFile = logFile
self.titleText = titleText
self.firstLineText = firstLineText
self.lastLineText = lastLineText
self["text"] = ScrollLabel("")
self["actions"] = ActionMap(["WizardActions", "DirectionActions", "ChannelSelectBaseActions"],
{
"ok": self.cancel,
"back": self.cancel,
"up": self["text"].pageUp,
"down": self["text"].pageDown,
"left": self["text"].pageUp,
"right": self["text"].pageDown,
"nextBouquet": self["text"].lastPage,
"prevBouquet": self.firstPage,
}, -1)
self.onLayoutFinish.append(self.readLog)
def cancel(self):
self.close()
def setText(self, text):
self["text"].setText(text)
def close(self):
Screen.close(self)
def firstPage(self):
self["text"].long_text.move(ePoint(0,0))
self["text"].updateScrollbar()
def readLog(self):
# Set title and text
title = _("Show Log file") + " - AutoTimer " + self.titleText
text = _("Reading log file...\n") + self.logFile + _("\nCancel?")
self.setTitle(title)
self.setText(text)
if not fileExists(self.logFile):
self.setText(_("No file found"))
elif not os.path.getsize(self.logFile) == 0:
file = open(self.logFile, "r")
text = file.read()
file.close()
try:
if self.firstLineText != "":
text = self.firstLineText + text
if self.lastLineText != "":
text = text + self.lastLineText
self.setText(text)
self["text"].lastPage()
except:
pass | 0.071701 | 0.057098 |
__author__ = '<NAME>'
__date__ = '19/03/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '<PASSWORD>'
import qgis # NOQA
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsProjectMetadata,
QgsAbstractMetadataBase,
QgsProject,
QgsNativeProjectMetadataValidator)
from qgis.PyQt.QtCore import (QDate,
QTime,
QDateTime)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtTest import QSignalSpy
start_app()
class TestQgsProjectMetadata(unittest.TestCase):
def testGettersSetters(self):
m = QgsProjectMetadata()
m.setIdentifier('identifier')
self.assertEqual(m.identifier(), 'identifier')
m.setParentIdentifier('parent identifier')
self.assertEqual(m.parentIdentifier(), 'parent identifier')
m.setLanguage('en-us')
self.assertEqual(m.language(), 'en-us')
m.setType('type')
self.assertEqual(m.type(), 'type')
m.setTitle('title')
self.assertEqual(m.title(), 'title')
m.setCategories(['category'])
self.assertEqual(m.categories(), ['category'])
m.setAbstract('abstract')
self.assertEqual(m.abstract(), 'abstract')
m.setHistory(['loaded into QGIS'])
self.assertEqual(m.history(), ['loaded into QGIS'])
m.setHistory(['accidentally deleted some features'])
self.assertEqual(m.history(), ['accidentally deleted some features'])
m.addHistoryItem('panicked and deleted more')
self.assertEqual(m.history(), ['accidentally deleted some features', 'panicked and deleted more'])
m.setAuthor('my author')
self.assertEqual(m.author(), 'my author')
m.setCreationDateTime(QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
self.assertEqual(m.creationDateTime(), QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
def createTestMetadata(self):
"""
Returns a standard metadata which can be tested with checkExpectedMetadata
"""
m = QgsProjectMetadata()
m.setIdentifier('1234')
m.setParentIdentifier('xyz')
m.setLanguage('en-CA')
m.setType('project')
m.setTitle('roads')
m.setAbstract('my roads')
m.setHistory(['history a', 'history b'])
m.setKeywords({
'GEMET': ['kw1', 'kw2'],
'gmd:topicCategory': ['natural'],
})
c = QgsAbstractMetadataBase.Contact()
c.name = '<NAME>'
c.organization = 'ACME'
c.position = 'staff'
c.voice = '1500 515 555'
c.fax = 'xx.xxx.xxx.xxxx'
c.email = '<EMAIL>'
c.role = 'pointOfContact'
address = QgsAbstractMetadataBase.Address()
address.type = 'postal'
address.address = '123 Main Street'
address.city = 'anycity'
address.administrativeArea = 'anyprovince'
address.postalCode = '90210'
address.country = 'Canada'
c.addresses = [address]
m.setContacts([c])
l = QgsAbstractMetadataBase.Link()
l.name = 'geonode:roads'
l.type = 'OGC:WMS'
l.description = 'my GeoNode road layer'
l.url = 'http://example.org/wms'
l2 = QgsAbstractMetadataBase.Link()
l2.name = 'geonode:roads'
l2.type = 'OGC:WFS'
l2.description = 'my GeoNode road layer'
l2.url = 'http://example.org/wfs'
l3 = QgsAbstractMetadataBase.Link()
l3.name = 'roads'
l3.type = 'WWW:LINK'
l3.description = 'full dataset download'
l3.url = 'http://example.org/roads.tgz'
l3.format = 'ESRI Shapefile'
l3.mimeType = 'application/gzip'
l3.size = '283676'
m.setLinks([l, l2, l3])
m.setAuthor('my author')
m.setCreationDateTime(QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
return m
def testEquality(self):
md = self.createTestMetadata()
md2 = self.createTestMetadata()
self.assertEqual(md, md2)
md2.setAuthor('xx')
self.assertNotEqual(md, md2)
md2 = self.createTestMetadata()
md2.setCreationDateTime(QDateTime(QDate(2003, 12, 17), QTime(9, 30, 47)))
self.assertNotEqual(md, md2)
def checkExpectedMetadata(self, m):
"""
Checks that a metadata object matches that returned by createTestMetadata
"""
self.assertEqual(m.identifier(), '1234')
self.assertEqual(m.parentIdentifier(), 'xyz')
self.assertEqual(m.language(), 'en-CA')
self.assertEqual(m.type(), 'project')
self.assertEqual(m.title(), 'roads')
self.assertEqual(m.abstract(), 'my roads')
self.assertEqual(m.history(), ['history a', 'history b'])
self.assertEqual(
m.keywords(),
{'GEMET': ['kw1', 'kw2'], 'gmd:topicCategory': ['natural']})
self.assertEqual(m.contacts()[0].name, '<NAME>')
self.assertEqual(m.contacts()[0].organization, 'ACME')
self.assertEqual(m.contacts()[0].position, 'staff')
self.assertEqual(m.contacts()[0].voice, '1500 515 555')
self.assertEqual(m.contacts()[0].fax, 'xx.xxx.xxx.xxxx')
self.assertEqual(m.contacts()[0].email, '<EMAIL>')
self.assertEqual(m.contacts()[0].role, 'pointOfContact')
self.assertEqual(m.contacts()[0].addresses[0].type, 'postal')
self.assertEqual(m.contacts()[0].addresses[0].address, '123 Main Street')
self.assertEqual(m.contacts()[0].addresses[0].city, 'anycity')
self.assertEqual(m.contacts()[0].addresses[0].administrativeArea, 'anyprovince')
self.assertEqual(m.contacts()[0].addresses[0].postalCode, '90210')
self.assertEqual(m.contacts()[0].addresses[0].country, 'Canada')
self.assertEqual(m.links()[0].name, 'geonode:roads')
self.assertEqual(m.links()[0].type, 'OGC:WMS')
self.assertEqual(m.links()[0].description, 'my GeoNode road layer')
self.assertEqual(m.links()[0].url, 'http://example.org/wms')
self.assertEqual(m.links()[1].name, 'geonode:roads')
self.assertEqual(m.links()[1].type, 'OGC:WFS')
self.assertEqual(m.links()[1].description, 'my GeoNode road layer')
self.assertEqual(m.links()[1].url, 'http://example.org/wfs')
self.assertEqual(m.links()[2].name, 'roads')
self.assertEqual(m.links()[2].type, 'WWW:LINK')
self.assertEqual(m.links()[2].description, 'full dataset download')
self.assertEqual(m.links()[2].url, 'http://example.org/roads.tgz')
self.assertEqual(m.links()[2].format, 'ESRI Shapefile')
self.assertEqual(m.links()[2].mimeType, 'application/gzip')
self.assertEqual(m.links()[2].size, '283676')
self.assertEqual(m.author(), 'my author')
self.assertEqual(m.creationDateTime(), QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
def testStandard(self):
m = self.createTestMetadata()
self.checkExpectedMetadata(m)
def testSaveReadFromXml(self):
"""
Test saving and reading metadata from a XML.
"""
# save metadata to XML
m = self.createTestMetadata()
doc = QDomDocument("testdoc")
elem = doc.createElement("metadata")
self.assertTrue(m.writeMetadataXml(elem, doc))
# read back from XML and check result
m2 = QgsProjectMetadata()
m2.readMetadataXml(elem)
self.checkExpectedMetadata(m2)
def testValidateNative(self): # spellok
"""
Test validating metadata against QGIS native schema
"""
m = self.createTestMetadata()
v = QgsNativeProjectMetadataValidator()
res, list = v.validate(m)
self.assertTrue(res)
self.assertFalse(list)
# corrupt metadata piece by piece...
m = self.createTestMetadata()
m.setIdentifier('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'identifier')
m = self.createTestMetadata()
m.setLanguage('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'language')
m = self.createTestMetadata()
m.setType('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'type')
m = self.createTestMetadata()
m.setTitle('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'title')
m = self.createTestMetadata()
m.setAbstract('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'abstract')
m = self.createTestMetadata()
m.setContacts([])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'contacts')
m = self.createTestMetadata()
m.setLinks([])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'links')
m = self.createTestMetadata()
m.setKeywords({'': ['kw1', 'kw2']})
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'keywords')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
m.setKeywords({'AA': []})
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'keywords')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
c = m.contacts()[0]
c.name = ''
m.setContacts([c])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'contacts')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
l = m.links()[0]
l.name = ''
m.setLinks([l])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'links')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
l = m.links()[0]
l.type = ''
m.setLinks([l])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'links')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
l = m.links()[0]
l.url = ''
m.setLinks([l])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'links')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
m.setAuthor('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'author')
m = self.createTestMetadata()
m.setCreationDateTime(QDateTime())
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'creation')
def testProject(self):
p = QgsProject()
m = self.createTestMetadata()
metadata_changed_spy = QSignalSpy(p.metadataChanged)
p.setMetadata(m)
self.assertEqual(len(metadata_changed_spy), 1)
self.checkExpectedMetadata(p.metadata())
p.clear()
self.assertEqual(len(metadata_changed_spy), 2)
self.assertEqual(p.metadata().title(), '')
# test that the project title is just a shortcut to the metadata title field
p.setTitle('my title')
self.assertEqual(p.metadata().title(), 'my title')
m.setTitle('my title 2')
p.setMetadata(m)
self.assertEqual(p.title(), 'my title 2')
if __name__ == '__main__':
unittest.main() | tests/src/python/test_qgsprojectmetadata.py | __author__ = '<NAME>'
__date__ = '19/03/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '<PASSWORD>'
import qgis # NOQA
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsProjectMetadata,
QgsAbstractMetadataBase,
QgsProject,
QgsNativeProjectMetadataValidator)
from qgis.PyQt.QtCore import (QDate,
QTime,
QDateTime)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtTest import QSignalSpy
start_app()
class TestQgsProjectMetadata(unittest.TestCase):
def testGettersSetters(self):
m = QgsProjectMetadata()
m.setIdentifier('identifier')
self.assertEqual(m.identifier(), 'identifier')
m.setParentIdentifier('parent identifier')
self.assertEqual(m.parentIdentifier(), 'parent identifier')
m.setLanguage('en-us')
self.assertEqual(m.language(), 'en-us')
m.setType('type')
self.assertEqual(m.type(), 'type')
m.setTitle('title')
self.assertEqual(m.title(), 'title')
m.setCategories(['category'])
self.assertEqual(m.categories(), ['category'])
m.setAbstract('abstract')
self.assertEqual(m.abstract(), 'abstract')
m.setHistory(['loaded into QGIS'])
self.assertEqual(m.history(), ['loaded into QGIS'])
m.setHistory(['accidentally deleted some features'])
self.assertEqual(m.history(), ['accidentally deleted some features'])
m.addHistoryItem('panicked and deleted more')
self.assertEqual(m.history(), ['accidentally deleted some features', 'panicked and deleted more'])
m.setAuthor('my author')
self.assertEqual(m.author(), 'my author')
m.setCreationDateTime(QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
self.assertEqual(m.creationDateTime(), QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
def createTestMetadata(self):
"""
Returns a standard metadata which can be tested with checkExpectedMetadata
"""
m = QgsProjectMetadata()
m.setIdentifier('1234')
m.setParentIdentifier('xyz')
m.setLanguage('en-CA')
m.setType('project')
m.setTitle('roads')
m.setAbstract('my roads')
m.setHistory(['history a', 'history b'])
m.setKeywords({
'GEMET': ['kw1', 'kw2'],
'gmd:topicCategory': ['natural'],
})
c = QgsAbstractMetadataBase.Contact()
c.name = '<NAME>'
c.organization = 'ACME'
c.position = 'staff'
c.voice = '1500 515 555'
c.fax = 'xx.xxx.xxx.xxxx'
c.email = '<EMAIL>'
c.role = 'pointOfContact'
address = QgsAbstractMetadataBase.Address()
address.type = 'postal'
address.address = '123 Main Street'
address.city = 'anycity'
address.administrativeArea = 'anyprovince'
address.postalCode = '90210'
address.country = 'Canada'
c.addresses = [address]
m.setContacts([c])
l = QgsAbstractMetadataBase.Link()
l.name = 'geonode:roads'
l.type = 'OGC:WMS'
l.description = 'my GeoNode road layer'
l.url = 'http://example.org/wms'
l2 = QgsAbstractMetadataBase.Link()
l2.name = 'geonode:roads'
l2.type = 'OGC:WFS'
l2.description = 'my GeoNode road layer'
l2.url = 'http://example.org/wfs'
l3 = QgsAbstractMetadataBase.Link()
l3.name = 'roads'
l3.type = 'WWW:LINK'
l3.description = 'full dataset download'
l3.url = 'http://example.org/roads.tgz'
l3.format = 'ESRI Shapefile'
l3.mimeType = 'application/gzip'
l3.size = '283676'
m.setLinks([l, l2, l3])
m.setAuthor('my author')
m.setCreationDateTime(QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
return m
def testEquality(self):
md = self.createTestMetadata()
md2 = self.createTestMetadata()
self.assertEqual(md, md2)
md2.setAuthor('xx')
self.assertNotEqual(md, md2)
md2 = self.createTestMetadata()
md2.setCreationDateTime(QDateTime(QDate(2003, 12, 17), QTime(9, 30, 47)))
self.assertNotEqual(md, md2)
def checkExpectedMetadata(self, m):
"""
Checks that a metadata object matches that returned by createTestMetadata
"""
self.assertEqual(m.identifier(), '1234')
self.assertEqual(m.parentIdentifier(), 'xyz')
self.assertEqual(m.language(), 'en-CA')
self.assertEqual(m.type(), 'project')
self.assertEqual(m.title(), 'roads')
self.assertEqual(m.abstract(), 'my roads')
self.assertEqual(m.history(), ['history a', 'history b'])
self.assertEqual(
m.keywords(),
{'GEMET': ['kw1', 'kw2'], 'gmd:topicCategory': ['natural']})
self.assertEqual(m.contacts()[0].name, '<NAME>')
self.assertEqual(m.contacts()[0].organization, 'ACME')
self.assertEqual(m.contacts()[0].position, 'staff')
self.assertEqual(m.contacts()[0].voice, '1500 515 555')
self.assertEqual(m.contacts()[0].fax, 'xx.xxx.xxx.xxxx')
self.assertEqual(m.contacts()[0].email, '<EMAIL>')
self.assertEqual(m.contacts()[0].role, 'pointOfContact')
self.assertEqual(m.contacts()[0].addresses[0].type, 'postal')
self.assertEqual(m.contacts()[0].addresses[0].address, '123 Main Street')
self.assertEqual(m.contacts()[0].addresses[0].city, 'anycity')
self.assertEqual(m.contacts()[0].addresses[0].administrativeArea, 'anyprovince')
self.assertEqual(m.contacts()[0].addresses[0].postalCode, '90210')
self.assertEqual(m.contacts()[0].addresses[0].country, 'Canada')
self.assertEqual(m.links()[0].name, 'geonode:roads')
self.assertEqual(m.links()[0].type, 'OGC:WMS')
self.assertEqual(m.links()[0].description, 'my GeoNode road layer')
self.assertEqual(m.links()[0].url, 'http://example.org/wms')
self.assertEqual(m.links()[1].name, 'geonode:roads')
self.assertEqual(m.links()[1].type, 'OGC:WFS')
self.assertEqual(m.links()[1].description, 'my GeoNode road layer')
self.assertEqual(m.links()[1].url, 'http://example.org/wfs')
self.assertEqual(m.links()[2].name, 'roads')
self.assertEqual(m.links()[2].type, 'WWW:LINK')
self.assertEqual(m.links()[2].description, 'full dataset download')
self.assertEqual(m.links()[2].url, 'http://example.org/roads.tgz')
self.assertEqual(m.links()[2].format, 'ESRI Shapefile')
self.assertEqual(m.links()[2].mimeType, 'application/gzip')
self.assertEqual(m.links()[2].size, '283676')
self.assertEqual(m.author(), 'my author')
self.assertEqual(m.creationDateTime(), QDateTime(QDate(2001, 12, 17), QTime(9, 30, 47)))
def testStandard(self):
m = self.createTestMetadata()
self.checkExpectedMetadata(m)
def testSaveReadFromXml(self):
"""
Test saving and reading metadata from a XML.
"""
# save metadata to XML
m = self.createTestMetadata()
doc = QDomDocument("testdoc")
elem = doc.createElement("metadata")
self.assertTrue(m.writeMetadataXml(elem, doc))
# read back from XML and check result
m2 = QgsProjectMetadata()
m2.readMetadataXml(elem)
self.checkExpectedMetadata(m2)
def testValidateNative(self): # spellok
"""
Test validating metadata against QGIS native schema
"""
m = self.createTestMetadata()
v = QgsNativeProjectMetadataValidator()
res, list = v.validate(m)
self.assertTrue(res)
self.assertFalse(list)
# corrupt metadata piece by piece...
m = self.createTestMetadata()
m.setIdentifier('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'identifier')
m = self.createTestMetadata()
m.setLanguage('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'language')
m = self.createTestMetadata()
m.setType('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'type')
m = self.createTestMetadata()
m.setTitle('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'title')
m = self.createTestMetadata()
m.setAbstract('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'abstract')
m = self.createTestMetadata()
m.setContacts([])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'contacts')
m = self.createTestMetadata()
m.setLinks([])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'links')
m = self.createTestMetadata()
m.setKeywords({'': ['kw1', 'kw2']})
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'keywords')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
m.setKeywords({'AA': []})
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'keywords')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
c = m.contacts()[0]
c.name = ''
m.setContacts([c])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'contacts')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
l = m.links()[0]
l.name = ''
m.setLinks([l])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'links')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
l = m.links()[0]
l.type = ''
m.setLinks([l])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'links')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
l = m.links()[0]
l.url = ''
m.setLinks([l])
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'links')
self.assertEqual(list[0].identifier, 0)
m = self.createTestMetadata()
m.setAuthor('')
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'author')
m = self.createTestMetadata()
m.setCreationDateTime(QDateTime())
res, list = v.validate(m)
self.assertFalse(res)
self.assertEqual(list[0].section, 'creation')
def testProject(self):
p = QgsProject()
m = self.createTestMetadata()
metadata_changed_spy = QSignalSpy(p.metadataChanged)
p.setMetadata(m)
self.assertEqual(len(metadata_changed_spy), 1)
self.checkExpectedMetadata(p.metadata())
p.clear()
self.assertEqual(len(metadata_changed_spy), 2)
self.assertEqual(p.metadata().title(), '')
# test that the project title is just a shortcut to the metadata title field
p.setTitle('my title')
self.assertEqual(p.metadata().title(), 'my title')
m.setTitle('my title 2')
p.setMetadata(m)
self.assertEqual(p.title(), 'my title 2')
if __name__ == '__main__':
unittest.main() | 0.642881 | 0.233051 |
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String
from sqlalchemy.orm import backref, relationship
from sqlalchemy.schema import UniqueConstraint
from c3po.db.base import Base
class UserLikes(Base):
__tablename__ = "user_likes"
# Columns
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("user.id"), primary_key=True)
link_id = Column(Integer, ForeignKey("link.id"), primary_key=True)
reaction_type = Column(Integer)
# Relationships
link = relationship(
"Link", backref=backref("liked_by", cascade="all, delete-orphan")
)
user = relationship("User", backref=backref("likes", cascade="all, delete-orphan"))
# Helper methods
def __init__(self, link=None, user=None, reaction_type=None):
self.link = link
self.user = user
self.reaction_type = reaction_type
class UserPosts(Base):
__tablename__ = "user_posts"
# Columns
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(Integer, ForeignKey("user.id"), primary_key=True)
link_id = Column(Integer, ForeignKey("link.id"), primary_key=True)
share_date = Column(DateTime)
caption = Column(String(160))
facebook_id = Column(String(160))
likes_count = Column(Integer)
permalink_url = Column(String)
# Relationships
link = relationship(
"Link", backref=backref("posted_by", cascade="all, delete-orphan")
)
user = relationship(
"User", backref=backref("user_links", cascade="all, delete-orphan")
)
# Helper methods
def __init__(
self,
user=None,
link=None,
share_date=None,
caption=None,
facebook_id=None,
permalink_url=None,
):
self.user = user
self.link = link
self.share_date = share_date
self.caption = caption
self.facebook_id = facebook_id
self.permalink_url = permalink_url
class User(Base):
__tablename__ = "user"
# Columns
id = Column(Integer, primary_key=True)
name = Column("name", String(32), nullable=False)
facebook_id = Column("facebook_id", String, unique=True)
image = Column("image", String(160))
post_count = Column("post_count", Integer)
liked_count = Column("liked_count", Integer)
likes_count = Column("likes_count", Integer)
__table_args__ = (UniqueConstraint("id", "facebook_id", name="user_id"),)
def __init__(self, name, facebook_id, image):
self.name = name
self.facebook_id = facebook_id
self.image = image | c3po/db/models/user.py | from sqlalchemy import Column, DateTime, ForeignKey, Integer, String
from sqlalchemy.orm import backref, relationship
from sqlalchemy.schema import UniqueConstraint
from c3po.db.base import Base
class UserLikes(Base):
__tablename__ = "user_likes"
# Columns
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("user.id"), primary_key=True)
link_id = Column(Integer, ForeignKey("link.id"), primary_key=True)
reaction_type = Column(Integer)
# Relationships
link = relationship(
"Link", backref=backref("liked_by", cascade="all, delete-orphan")
)
user = relationship("User", backref=backref("likes", cascade="all, delete-orphan"))
# Helper methods
def __init__(self, link=None, user=None, reaction_type=None):
self.link = link
self.user = user
self.reaction_type = reaction_type
class UserPosts(Base):
__tablename__ = "user_posts"
# Columns
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(Integer, ForeignKey("user.id"), primary_key=True)
link_id = Column(Integer, ForeignKey("link.id"), primary_key=True)
share_date = Column(DateTime)
caption = Column(String(160))
facebook_id = Column(String(160))
likes_count = Column(Integer)
permalink_url = Column(String)
# Relationships
link = relationship(
"Link", backref=backref("posted_by", cascade="all, delete-orphan")
)
user = relationship(
"User", backref=backref("user_links", cascade="all, delete-orphan")
)
# Helper methods
def __init__(
self,
user=None,
link=None,
share_date=None,
caption=None,
facebook_id=None,
permalink_url=None,
):
self.user = user
self.link = link
self.share_date = share_date
self.caption = caption
self.facebook_id = facebook_id
self.permalink_url = permalink_url
class User(Base):
__tablename__ = "user"
# Columns
id = Column(Integer, primary_key=True)
name = Column("name", String(32), nullable=False)
facebook_id = Column("facebook_id", String, unique=True)
image = Column("image", String(160))
post_count = Column("post_count", Integer)
liked_count = Column("liked_count", Integer)
likes_count = Column("likes_count", Integer)
__table_args__ = (UniqueConstraint("id", "facebook_id", name="user_id"),)
def __init__(self, name, facebook_id, image):
self.name = name
self.facebook_id = facebook_id
self.image = image | 0.628179 | 0.196498 |
import argparse
import pdb
import traceback
from dataclasses import dataclass
from typing import List, Tuple
Layer = List[List[int]]
@dataclass
class Bitmap:
width: int
height: int
layer: Layer
@classmethod
def from_image(cls, image):
bitmap = Bitmap(image.width, image.height, [])
bitmap.layer = [[None] * image.width for _ in range(image.height)]
for layer in reversed(image.layers):
for row in range(bitmap.height):
for col in range(bitmap.width):
if layer[row][col] != 2:
bitmap.layer[row][col] = layer[row][col]
return bitmap
def dump(self) -> None:
for row in range(self.height):
for col in range(self.width):
print(("\u2588", "\u2591")[self.layer[row][col]], end="")
print("")
@dataclass
class Image:
width: int
height: int
layers: List[Layer]
num_layers: int = 0
@classmethod
def from_pixels(cls, pixels: List[int], width: int, height: int):
image = Image(width, height, [])
image.num_layers = len(pixels) // (width * height)
for l in range(image.num_layers):
layer: Layer = []
for row in range(height):
start: int = l * width * height + width * row
layer.append(pixels[start : start + width])
image.layers.append(layer[:])
return image
def count_pixels_per_layer(self, target: int) -> List[int]:
totals: List[int] = []
for idx, layer in enumerate(self.layers):
total: int = 0
for row in layer:
total += row.count(target)
totals.append(total)
return totals
def dump_layer(self, idx: int) -> None:
layer = self.layers[idx]
for row in range(self.height):
for col in range(self.width):
print(layer[row][col], end="")
print("")
def solve(pixels: List[int], width: int, height: int) -> Tuple[int, int]:
image: Image = Image.from_pixels(pixels, width, height)
zeros_per_layer = image.count_pixels_per_layer(0)
min_zero_layer = zeros_per_layer.index(min(zeros_per_layer))
ones_per_layer = image.count_pixels_per_layer(1)
twos_per_layer = image.count_pixels_per_layer(2)
one: int = ones_per_layer[min_zero_layer] * twos_per_layer[min_zero_layer]
two: int = -1
bitmap: Bitmap = Bitmap.from_image(image)
bitmap.dump()
return (one, two)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Advent of Code - 2019 - Day 8 - Space Image Format."
)
parser.add_argument(
"input",
type=str,
default="input.txt",
nargs="?",
help="The puzzle input. (Default %(default)s)",
)
parser.add_argument(
"width",
type=int,
default=25,
nargs="?",
help="The width of the image. (Default %(default)s)",
)
parser.add_argument(
"height",
type=int,
default=6,
nargs="?",
help="The height of the image. (Default %(default)s)",
)
args = parser.parse_args()
pixels: List[int] = []
with open(args.input) as inf:
for line in inf:
pixels += list(map(int, list(line.strip())))
try:
print(solve(pixels, args.width, args.height))
except Exception:
traceback.print_exc()
pdb.post_mortem() | 2019/08-space_image/solve.py |
import argparse
import pdb
import traceback
from dataclasses import dataclass
from typing import List, Tuple
Layer = List[List[int]]
@dataclass
class Bitmap:
width: int
height: int
layer: Layer
@classmethod
def from_image(cls, image):
bitmap = Bitmap(image.width, image.height, [])
bitmap.layer = [[None] * image.width for _ in range(image.height)]
for layer in reversed(image.layers):
for row in range(bitmap.height):
for col in range(bitmap.width):
if layer[row][col] != 2:
bitmap.layer[row][col] = layer[row][col]
return bitmap
def dump(self) -> None:
for row in range(self.height):
for col in range(self.width):
print(("\u2588", "\u2591")[self.layer[row][col]], end="")
print("")
@dataclass
class Image:
width: int
height: int
layers: List[Layer]
num_layers: int = 0
@classmethod
def from_pixels(cls, pixels: List[int], width: int, height: int):
image = Image(width, height, [])
image.num_layers = len(pixels) // (width * height)
for l in range(image.num_layers):
layer: Layer = []
for row in range(height):
start: int = l * width * height + width * row
layer.append(pixels[start : start + width])
image.layers.append(layer[:])
return image
def count_pixels_per_layer(self, target: int) -> List[int]:
totals: List[int] = []
for idx, layer in enumerate(self.layers):
total: int = 0
for row in layer:
total += row.count(target)
totals.append(total)
return totals
def dump_layer(self, idx: int) -> None:
layer = self.layers[idx]
for row in range(self.height):
for col in range(self.width):
print(layer[row][col], end="")
print("")
def solve(pixels: List[int], width: int, height: int) -> Tuple[int, int]:
image: Image = Image.from_pixels(pixels, width, height)
zeros_per_layer = image.count_pixels_per_layer(0)
min_zero_layer = zeros_per_layer.index(min(zeros_per_layer))
ones_per_layer = image.count_pixels_per_layer(1)
twos_per_layer = image.count_pixels_per_layer(2)
one: int = ones_per_layer[min_zero_layer] * twos_per_layer[min_zero_layer]
two: int = -1
bitmap: Bitmap = Bitmap.from_image(image)
bitmap.dump()
return (one, two)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Advent of Code - 2019 - Day 8 - Space Image Format."
)
parser.add_argument(
"input",
type=str,
default="input.txt",
nargs="?",
help="The puzzle input. (Default %(default)s)",
)
parser.add_argument(
"width",
type=int,
default=25,
nargs="?",
help="The width of the image. (Default %(default)s)",
)
parser.add_argument(
"height",
type=int,
default=6,
nargs="?",
help="The height of the image. (Default %(default)s)",
)
args = parser.parse_args()
pixels: List[int] = []
with open(args.input) as inf:
for line in inf:
pixels += list(map(int, list(line.strip())))
try:
print(solve(pixels, args.width, args.height))
except Exception:
traceback.print_exc()
pdb.post_mortem() | 0.697609 | 0.389024 |
import struct
import time
from rethinkdb import ql2_pb2
from rethinkdb.errors import ReqlAuthError, ReqlDriverError, ReqlTimeoutError, RqlCursorEmpty
from rethinkdb.net import Connection as ConnectionBase, Cursor, Query, Response, maybe_profile
from twisted.internet import defer, reactor
from twisted.internet.defer import CancelledError, Deferred, DeferredQueue, inlineCallbacks, returnValue
from twisted.internet.endpoints import clientFromString
from twisted.internet.error import TimeoutError
from twisted.internet.protocol import ClientFactory, Protocol
__all__ = ['Connection']
pResponse = ql2_pb2.Response.ResponseType
pQuery = ql2_pb2.Query.QueryType
class DatabaseProtocol(Protocol):
WAITING_FOR_HANDSHAKE = 0
READY = 1
def __init__(self, factory):
self.factory = factory
self.state = DatabaseProtocol.WAITING_FOR_HANDSHAKE
self._handlers = {
DatabaseProtocol.WAITING_FOR_HANDSHAKE: self._handleHandshake,
DatabaseProtocol.READY: self._handleResponse
}
self.buf = bytes()
self.buf_expected_length = 0
self.buf_token = None
self.wait_for_handshake = Deferred()
self._open = True
def connectionMade(self):
# Send immediately the handshake.
self.factory.handshake.reset()
self.transport.write(self.factory.handshake.next_message(None))
# Defer a timer which will callback when timed out and errback the
# wait_for_handshake. Otherwise, it will be cancelled in
# handleHandshake.
self._timeout_defer = reactor.callLater(self.factory.timeout,
self._handleHandshakeTimeout)
def connectionLost(self, reason):
self._open = False
def _handleHandshakeTimeout(self):
# If we are here, we failed to do the handshake before the timeout.
# We close the connection and raise an ReqlTimeoutError in the
# wait_for_handshake deferred.
self._open = False
self.transport.loseConnection()
self.wait_for_handshake.errback(ReqlTimeoutError())
def _handleHandshake(self, data):
try:
self.buf += data
while True:
end_index = self.buf.find(b'\0')
if end_index != -1:
response = self.buf[:end_index]
self.buf = self.buf[end_index + 1:]
request = self.factory.handshake.next_message(response)
if request is None:
# We're now ready to work with real data.
self.state = DatabaseProtocol.READY
# We cancel the scheduled timeout.
self._timeout_defer.cancel()
# We callback our wait_for_handshake.
self.wait_for_handshake.callback(None)
elif request != "":
self.transport.write(request)
else:
break
except Exception as e:
self.wait_for_handshake.errback(e)
def _handleResponse(self, data):
# If we have more than one response, we should handle all of them.
self.buf += data
while True:
# 1. Read the header, until we read the length of the awaited payload.
if self.buf_expected_length == 0:
if len(self.buf) >= 12:
token, length = struct.unpack('<qL', self.buf[:12])
self.buf_token = token
self.buf_expected_length = length
self.buf = self.buf[12:]
else:
# We quit the function, it is impossible to have read the
# entire payload at this point.
return
# 2. Buffer the data, until the size of the data match the expected
# length provided by the header.
if len(self.buf) < self.buf_expected_length:
return
self.factory.response_handler(self.buf_token, self.buf[:self.buf_expected_length])
self.buf = self.buf[self.buf_expected_length:]
self.buf_token = None
self.buf_expected_length = 0
def dataReceived(self, data):
try:
if self._open:
self._handlers[self.state](data)
except Exception as e:
self.transport.loseConnection()
raise ReqlDriverError('Driver failed to handle received data.'
'Error: {exc}. Dropping the connection.'.format(exc=str(e)))
class DatabaseProtoFactory(ClientFactory):
protocol = DatabaseProtocol
def __init__(self, timeout, response_handler, handshake):
self.timeout = timeout
self.handshake = handshake
self.response_handler = response_handler
def startedConnecting(self, connector):
pass
def buildProtocol(self, addr):
p = DatabaseProtocol(self)
return p
def clientConnectionLost(self, connector, reason):
pass
def clientConnectionFailed(self, connector, reason):
pass
class CursorItems(DeferredQueue):
def __init__(self):
super(CursorItems, self).__init__()
def cancel_getters(self, err):
"""
Cancel all waiters.
"""
for waiter in self.waiting[:]:
if not waiter.called:
waiter.errback(err)
self.waiting.remove(waiter)
def extend(self, data):
for k in data:
self.put(k)
def __len__(self):
return len(self.pending)
def __getitem__(self, index):
return self.pending[index]
def __iter__(self):
return iter(self.pending)
class TwistedCursor(Cursor):
def __init__(self, *args, **kwargs):
kwargs.setdefault('items_type', CursorItems)
super(TwistedCursor, self).__init__(*args, **kwargs)
self.waiting = list()
def _extend(self, res_buf):
Cursor._extend(self, res_buf)
if self.error is not None:
self.items.cancel_getters(self.error)
for d in self.waiting[:]:
d.callback(None)
self.waiting.remove(d)
def _empty_error(self):
return RqlCursorEmpty()
@inlineCallbacks
def fetch_next(self, wait=True):
timeout = Cursor._wait_to_timeout(wait)
deadline = None if timeout is None else time.time() + timeout
def wait_canceller(d):
d.errback(ReqlTimeoutError())
while len(self.items) == 0 and self.error is None:
self._maybe_fetch_batch()
wait = Deferred(canceller=wait_canceller)
self.waiting.append(wait)
if deadline is not None:
timeout = max(0, deadline - time.time())
reactor.callLater(timeout, lambda: wait.cancel())
yield wait
returnValue(not self._is_empty() or self._has_error())
def _has_error(self):
return self.error and (not isinstance(self.error, RqlCursorEmpty))
def _is_empty(self):
return isinstance(self.error, RqlCursorEmpty) and len(self.items) == 0
def _get_next(self, timeout):
if len(self.items) == 0 and self.error is not None:
return defer.fail(self.error)
def raise_timeout(errback):
if isinstance(errback.value, CancelledError):
raise ReqlTimeoutError()
else:
raise errback.value
item_defer = self.items.get()
if timeout is not None:
item_defer.addErrback(raise_timeout)
reactor.callLater(timeout, lambda: item_defer.cancel())
self._maybe_fetch_batch()
return item_defer
class ConnectionInstance(object):
def __init__(self, parent, start_reactor=False):
self._parent = parent
self._closing = False
self._connection = None
self._user_queries = {}
self._cursor_cache = {}
if start_reactor:
reactor.run()
def client_port(self):
if self.is_open():
return self._connection.transport.getHost().port
def client_address(self):
if self.is_open():
return self._connection.transport.getHost().host
def _handleResponse(self, token, data):
try:
cursor = self._cursor_cache.get(token)
if cursor is not None:
cursor._extend(data)
elif token in self._user_queries:
query, deferred = self._user_queries[token]
res = Response(token, data,
self._parent._get_json_decoder(query))
if res.type == pResponse.SUCCESS_ATOM:
deferred.callback(maybe_profile(res.data[0], res))
elif res.type in (pResponse.SUCCESS_SEQUENCE,
pResponse.SUCCESS_PARTIAL):
cursor = TwistedCursor(self, query, res)
deferred.callback(maybe_profile(cursor, res))
elif res.type == pResponse.WAIT_COMPLETE:
deferred.callback(None)
elif res.type == pResponse.SERVER_INFO:
deferred.callback(res.data[0])
else:
deferred.errback(res.make_error(query))
del self._user_queries[token]
elif not self._closing:
raise ReqlDriverError("Unexpected response received.")
except Exception as e:
if not self._closing:
self.close(exception=e)
@inlineCallbacks
def _connectTimeout(self, factory, timeout):
try:
# TODO: use ssl options
# TODO: this doesn't work for literal IPv6 addresses like '::1'
args = "tcp:%s:%d" % (self._parent.host, self._parent.port)
if timeout is not None:
args = args + (":timeout=%d" % timeout)
endpoint = clientFromString(reactor, args)
p = yield endpoint.connect(factory)
returnValue(p)
except TimeoutError:
raise ReqlTimeoutError()
@inlineCallbacks
def connect(self, timeout):
factory = DatabaseProtoFactory(timeout, self._handleResponse,
self._parent.handshake)
# We connect to the server, and send the handshake payload.
pConnection = None
try:
pConnection = yield self._connectTimeout(factory, timeout)
except Exception as e:
raise ReqlDriverError('Could not connect to {p.host}:{p.port}. Error: {exc}'
.format(p=self._parent, exc=str(e)))
# Now, we need to wait for the handshake.
try:
yield pConnection.wait_for_handshake
except ReqlAuthError as e:
raise
except ReqlTimeoutError as e:
raise ReqlTimeoutError(self._parent.host, self._parent.port)
except Exception as e:
raise ReqlDriverError('Connection interrupted during handshake with {p.host}:{p.port}. Error: {exc}'
.format(p=self._parent, exc=str(e)))
self._connection = pConnection
returnValue(self._parent)
def is_open(self):
return self._connection._open
def close(self, noreply_wait=False, token=None, exception=None):
d = defer.succeed(None)
self._closing = True
error_message = "Connection is closed"
if exception is not None:
error_message = "Connection is closed (reason: {exc})".format(exc=str(exception))
for cursor in list(self._cursor_cache.values()):
cursor._error(error_message)
for query, deferred in iter(self._user_queries.values()):
if not deferred.called:
deferred.errback(fail=ReqlDriverError(error_message))
self._user_queries = {}
self._cursor_cache = {}
if noreply_wait:
noreply = Query(pQuery.NOREPLY_WAIT, token, None, None)
d = self.run_query(noreply, False)
def closeConnection(res):
self._connection.transport.loseConnection()
return res
return d.addBoth(closeConnection)
@inlineCallbacks
def run_query(self, query, noreply):
response_defer = Deferred()
if not noreply:
self._user_queries[query.token] = (query, response_defer)
# Send the query
self._connection.transport.write(query.serialize(self._parent._get_json_encoder(query)))
if noreply:
returnValue(None)
else:
res = yield response_defer
returnValue(res)
class Connection(ConnectionBase):
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(ConnectionInstance, *args, **kwargs)
@inlineCallbacks
def reconnect(self, noreply_wait=True, timeout=None):
yield self.close(noreply_wait)
res = yield super(Connection, self).reconnect(noreply_wait, timeout)
returnValue(res)
@inlineCallbacks
def close(self, *args, **kwargs):
res = yield super(Connection, self).close(*args, **kwargs) or None
returnValue(res)
@inlineCallbacks
def noreply_wait(self):
res = yield super(Connection, self).noreply_wait()
returnValue(res)
@inlineCallbacks
def server(self):
res = yield super(Connection, self).server()
returnValue(res)
@inlineCallbacks
def _start(self, term, **global_optargs):
res = yield super(Connection, self)._start(term, **global_optargs)
returnValue(res)
@inlineCallbacks
def _continue(self, cursor):
res = yield super(Connection, self)._continue(cursor)
returnValue(res)
@inlineCallbacks
def _stop(self, cursor):
res = yield super(Connection, self)._stop(cursor)
returnValue(res) | rethinkdb/twisted_net/net_twisted.py |
import struct
import time
from rethinkdb import ql2_pb2
from rethinkdb.errors import ReqlAuthError, ReqlDriverError, ReqlTimeoutError, RqlCursorEmpty
from rethinkdb.net import Connection as ConnectionBase, Cursor, Query, Response, maybe_profile
from twisted.internet import defer, reactor
from twisted.internet.defer import CancelledError, Deferred, DeferredQueue, inlineCallbacks, returnValue
from twisted.internet.endpoints import clientFromString
from twisted.internet.error import TimeoutError
from twisted.internet.protocol import ClientFactory, Protocol
__all__ = ['Connection']
pResponse = ql2_pb2.Response.ResponseType
pQuery = ql2_pb2.Query.QueryType
class DatabaseProtocol(Protocol):
WAITING_FOR_HANDSHAKE = 0
READY = 1
def __init__(self, factory):
self.factory = factory
self.state = DatabaseProtocol.WAITING_FOR_HANDSHAKE
self._handlers = {
DatabaseProtocol.WAITING_FOR_HANDSHAKE: self._handleHandshake,
DatabaseProtocol.READY: self._handleResponse
}
self.buf = bytes()
self.buf_expected_length = 0
self.buf_token = None
self.wait_for_handshake = Deferred()
self._open = True
def connectionMade(self):
# Send immediately the handshake.
self.factory.handshake.reset()
self.transport.write(self.factory.handshake.next_message(None))
# Defer a timer which will callback when timed out and errback the
# wait_for_handshake. Otherwise, it will be cancelled in
# handleHandshake.
self._timeout_defer = reactor.callLater(self.factory.timeout,
self._handleHandshakeTimeout)
def connectionLost(self, reason):
self._open = False
def _handleHandshakeTimeout(self):
# If we are here, we failed to do the handshake before the timeout.
# We close the connection and raise an ReqlTimeoutError in the
# wait_for_handshake deferred.
self._open = False
self.transport.loseConnection()
self.wait_for_handshake.errback(ReqlTimeoutError())
def _handleHandshake(self, data):
try:
self.buf += data
while True:
end_index = self.buf.find(b'\0')
if end_index != -1:
response = self.buf[:end_index]
self.buf = self.buf[end_index + 1:]
request = self.factory.handshake.next_message(response)
if request is None:
# We're now ready to work with real data.
self.state = DatabaseProtocol.READY
# We cancel the scheduled timeout.
self._timeout_defer.cancel()
# We callback our wait_for_handshake.
self.wait_for_handshake.callback(None)
elif request != "":
self.transport.write(request)
else:
break
except Exception as e:
self.wait_for_handshake.errback(e)
def _handleResponse(self, data):
# If we have more than one response, we should handle all of them.
self.buf += data
while True:
# 1. Read the header, until we read the length of the awaited payload.
if self.buf_expected_length == 0:
if len(self.buf) >= 12:
token, length = struct.unpack('<qL', self.buf[:12])
self.buf_token = token
self.buf_expected_length = length
self.buf = self.buf[12:]
else:
# We quit the function, it is impossible to have read the
# entire payload at this point.
return
# 2. Buffer the data, until the size of the data match the expected
# length provided by the header.
if len(self.buf) < self.buf_expected_length:
return
self.factory.response_handler(self.buf_token, self.buf[:self.buf_expected_length])
self.buf = self.buf[self.buf_expected_length:]
self.buf_token = None
self.buf_expected_length = 0
def dataReceived(self, data):
try:
if self._open:
self._handlers[self.state](data)
except Exception as e:
self.transport.loseConnection()
raise ReqlDriverError('Driver failed to handle received data.'
'Error: {exc}. Dropping the connection.'.format(exc=str(e)))
class DatabaseProtoFactory(ClientFactory):
protocol = DatabaseProtocol
def __init__(self, timeout, response_handler, handshake):
self.timeout = timeout
self.handshake = handshake
self.response_handler = response_handler
def startedConnecting(self, connector):
pass
def buildProtocol(self, addr):
p = DatabaseProtocol(self)
return p
def clientConnectionLost(self, connector, reason):
pass
def clientConnectionFailed(self, connector, reason):
pass
class CursorItems(DeferredQueue):
def __init__(self):
super(CursorItems, self).__init__()
def cancel_getters(self, err):
"""
Cancel all waiters.
"""
for waiter in self.waiting[:]:
if not waiter.called:
waiter.errback(err)
self.waiting.remove(waiter)
def extend(self, data):
for k in data:
self.put(k)
def __len__(self):
return len(self.pending)
def __getitem__(self, index):
return self.pending[index]
def __iter__(self):
return iter(self.pending)
class TwistedCursor(Cursor):
def __init__(self, *args, **kwargs):
kwargs.setdefault('items_type', CursorItems)
super(TwistedCursor, self).__init__(*args, **kwargs)
self.waiting = list()
def _extend(self, res_buf):
Cursor._extend(self, res_buf)
if self.error is not None:
self.items.cancel_getters(self.error)
for d in self.waiting[:]:
d.callback(None)
self.waiting.remove(d)
def _empty_error(self):
return RqlCursorEmpty()
@inlineCallbacks
def fetch_next(self, wait=True):
timeout = Cursor._wait_to_timeout(wait)
deadline = None if timeout is None else time.time() + timeout
def wait_canceller(d):
d.errback(ReqlTimeoutError())
while len(self.items) == 0 and self.error is None:
self._maybe_fetch_batch()
wait = Deferred(canceller=wait_canceller)
self.waiting.append(wait)
if deadline is not None:
timeout = max(0, deadline - time.time())
reactor.callLater(timeout, lambda: wait.cancel())
yield wait
returnValue(not self._is_empty() or self._has_error())
def _has_error(self):
return self.error and (not isinstance(self.error, RqlCursorEmpty))
def _is_empty(self):
return isinstance(self.error, RqlCursorEmpty) and len(self.items) == 0
def _get_next(self, timeout):
if len(self.items) == 0 and self.error is not None:
return defer.fail(self.error)
def raise_timeout(errback):
if isinstance(errback.value, CancelledError):
raise ReqlTimeoutError()
else:
raise errback.value
item_defer = self.items.get()
if timeout is not None:
item_defer.addErrback(raise_timeout)
reactor.callLater(timeout, lambda: item_defer.cancel())
self._maybe_fetch_batch()
return item_defer
class ConnectionInstance(object):
def __init__(self, parent, start_reactor=False):
self._parent = parent
self._closing = False
self._connection = None
self._user_queries = {}
self._cursor_cache = {}
if start_reactor:
reactor.run()
def client_port(self):
if self.is_open():
return self._connection.transport.getHost().port
def client_address(self):
if self.is_open():
return self._connection.transport.getHost().host
def _handleResponse(self, token, data):
try:
cursor = self._cursor_cache.get(token)
if cursor is not None:
cursor._extend(data)
elif token in self._user_queries:
query, deferred = self._user_queries[token]
res = Response(token, data,
self._parent._get_json_decoder(query))
if res.type == pResponse.SUCCESS_ATOM:
deferred.callback(maybe_profile(res.data[0], res))
elif res.type in (pResponse.SUCCESS_SEQUENCE,
pResponse.SUCCESS_PARTIAL):
cursor = TwistedCursor(self, query, res)
deferred.callback(maybe_profile(cursor, res))
elif res.type == pResponse.WAIT_COMPLETE:
deferred.callback(None)
elif res.type == pResponse.SERVER_INFO:
deferred.callback(res.data[0])
else:
deferred.errback(res.make_error(query))
del self._user_queries[token]
elif not self._closing:
raise ReqlDriverError("Unexpected response received.")
except Exception as e:
if not self._closing:
self.close(exception=e)
@inlineCallbacks
def _connectTimeout(self, factory, timeout):
try:
# TODO: use ssl options
# TODO: this doesn't work for literal IPv6 addresses like '::1'
args = "tcp:%s:%d" % (self._parent.host, self._parent.port)
if timeout is not None:
args = args + (":timeout=%d" % timeout)
endpoint = clientFromString(reactor, args)
p = yield endpoint.connect(factory)
returnValue(p)
except TimeoutError:
raise ReqlTimeoutError()
@inlineCallbacks
def connect(self, timeout):
factory = DatabaseProtoFactory(timeout, self._handleResponse,
self._parent.handshake)
# We connect to the server, and send the handshake payload.
pConnection = None
try:
pConnection = yield self._connectTimeout(factory, timeout)
except Exception as e:
raise ReqlDriverError('Could not connect to {p.host}:{p.port}. Error: {exc}'
.format(p=self._parent, exc=str(e)))
# Now, we need to wait for the handshake.
try:
yield pConnection.wait_for_handshake
except ReqlAuthError as e:
raise
except ReqlTimeoutError as e:
raise ReqlTimeoutError(self._parent.host, self._parent.port)
except Exception as e:
raise ReqlDriverError('Connection interrupted during handshake with {p.host}:{p.port}. Error: {exc}'
.format(p=self._parent, exc=str(e)))
self._connection = pConnection
returnValue(self._parent)
def is_open(self):
return self._connection._open
def close(self, noreply_wait=False, token=None, exception=None):
d = defer.succeed(None)
self._closing = True
error_message = "Connection is closed"
if exception is not None:
error_message = "Connection is closed (reason: {exc})".format(exc=str(exception))
for cursor in list(self._cursor_cache.values()):
cursor._error(error_message)
for query, deferred in iter(self._user_queries.values()):
if not deferred.called:
deferred.errback(fail=ReqlDriverError(error_message))
self._user_queries = {}
self._cursor_cache = {}
if noreply_wait:
noreply = Query(pQuery.NOREPLY_WAIT, token, None, None)
d = self.run_query(noreply, False)
def closeConnection(res):
self._connection.transport.loseConnection()
return res
return d.addBoth(closeConnection)
@inlineCallbacks
def run_query(self, query, noreply):
response_defer = Deferred()
if not noreply:
self._user_queries[query.token] = (query, response_defer)
# Send the query
self._connection.transport.write(query.serialize(self._parent._get_json_encoder(query)))
if noreply:
returnValue(None)
else:
res = yield response_defer
returnValue(res)
class Connection(ConnectionBase):
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(ConnectionInstance, *args, **kwargs)
@inlineCallbacks
def reconnect(self, noreply_wait=True, timeout=None):
yield self.close(noreply_wait)
res = yield super(Connection, self).reconnect(noreply_wait, timeout)
returnValue(res)
@inlineCallbacks
def close(self, *args, **kwargs):
res = yield super(Connection, self).close(*args, **kwargs) or None
returnValue(res)
@inlineCallbacks
def noreply_wait(self):
res = yield super(Connection, self).noreply_wait()
returnValue(res)
@inlineCallbacks
def server(self):
res = yield super(Connection, self).server()
returnValue(res)
@inlineCallbacks
def _start(self, term, **global_optargs):
res = yield super(Connection, self)._start(term, **global_optargs)
returnValue(res)
@inlineCallbacks
def _continue(self, cursor):
res = yield super(Connection, self)._continue(cursor)
returnValue(res)
@inlineCallbacks
def _stop(self, cursor):
res = yield super(Connection, self)._stop(cursor)
returnValue(res) | 0.443359 | 0.111024 |
import arcade
from pyglet.math import Vec2
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprite with Moving Platforms Example"
SPRITE_PIXEL_SIZE = 128
GRID_PIXEL_SIZE = (SPRITE_PIXEL_SIZE * SPRITE_SCALING)
# How many pixels to keep as a minimum margin between the character
# and the edge of the screen.
VIEWPORT_MARGIN = SPRITE_PIXEL_SIZE * SPRITE_SCALING
RIGHT_MARGIN = 4 * SPRITE_PIXEL_SIZE * SPRITE_SCALING
# Physics
MOVEMENT_SPEED = 10 * SPRITE_SCALING
JUMP_SPEED = 28 * SPRITE_SCALING
GRAVITY = .9 * SPRITE_SCALING
# How fast the camera pans to the player. 1.0 is instant.
CAMERA_SPEED = 0.1
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
""" Initializer """
# Call the parent init
super().__init__(width, height, title)
# Sprite lists
# Drawing non-moving walls separate from moving walls improves performance.
self.static_wall_list = None
self.moving_wall_list = None
self.player_list = None
# Set up the player
self.player_sprite = None
self.physics_engine = None
self.game_over = False
# Create the cameras. One for the GUI, one for the sprites.
# We scroll the 'sprite world' but not the GUI.
self.camera_sprites = arcade.Camera(SCREEN_WIDTH, SCREEN_HEIGHT)
self.camera_gui = arcade.Camera(SCREEN_WIDTH, SCREEN_HEIGHT)
self.left_down = False
self.right_down = False
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.static_wall_list = arcade.SpriteList()
self.moving_wall_list = arcade.SpriteList()
self.player_list = arcade.SpriteList()
# Set up the player
self.player_sprite = arcade.Sprite(":resources:images/animated_characters/female_person/"
"femalePerson_idle.png",
SPRITE_SCALING)
self.player_sprite.center_x = 2 * GRID_PIXEL_SIZE
self.player_sprite.center_y = 3 * GRID_PIXEL_SIZE
self.player_list.append(self.player_sprite)
# Create floor
for i in range(30):
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.bottom = 0
wall.center_x = i * GRID_PIXEL_SIZE
self.static_wall_list.append(wall)
# Create platform side to side
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.center_y = 3 * GRID_PIXEL_SIZE
wall.center_x = 3 * GRID_PIXEL_SIZE
wall.boundary_left = 2 * GRID_PIXEL_SIZE
wall.boundary_right = 5 * GRID_PIXEL_SIZE
wall.change_x = 2 * SPRITE_SCALING
self.moving_wall_list.append(wall)
# Create platform side to side
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.center_y = 3 * GRID_PIXEL_SIZE
wall.center_x = 7 * GRID_PIXEL_SIZE
wall.boundary_left = 5 * GRID_PIXEL_SIZE
wall.boundary_right = 9 * GRID_PIXEL_SIZE
wall.change_x = -2 * SPRITE_SCALING
self.moving_wall_list.append(wall)
# Create platform moving up and down
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.center_y = 5 * GRID_PIXEL_SIZE
wall.center_x = 5 * GRID_PIXEL_SIZE
wall.boundary_top = 8 * GRID_PIXEL_SIZE
wall.boundary_bottom = 4 * GRID_PIXEL_SIZE
wall.change_y = 2 * SPRITE_SCALING
self.moving_wall_list.append(wall)
# Create platform moving diagonally
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.center_y = 5 * GRID_PIXEL_SIZE
wall.center_x = 8 * GRID_PIXEL_SIZE
wall.boundary_left = 7 * GRID_PIXEL_SIZE
wall.boundary_right = 9 * GRID_PIXEL_SIZE
wall.boundary_top = 8 * GRID_PIXEL_SIZE
wall.boundary_bottom = 4 * GRID_PIXEL_SIZE
wall.change_x = 2 * SPRITE_SCALING
wall.change_y = 2 * SPRITE_SCALING
self.moving_wall_list.append(wall)
# Create our physics engine
self.physics_engine = \
arcade.PhysicsEnginePlatformer(self.player_sprite,
[self.static_wall_list, self.moving_wall_list],
gravity_constant=GRAVITY)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
self.game_over = False
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
self.clear()
# Select the camera we'll use to draw all our sprites
self.camera_sprites.use()
# Draw the sprites.
self.static_wall_list.draw()
self.moving_wall_list.draw()
self.player_list.draw()
self.camera_gui.use()
# Put the text on the screen.
distance = self.player_sprite.right
output = f"Distance: {distance}"
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def set_x_speed(self):
if self.left_down and not self.right_down:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif self.right_down and not self.left_down:
self.player_sprite.change_x = MOVEMENT_SPEED
else:
self.player_sprite.change_x = 0
def on_key_press(self, key, modifiers):
""" Called whenever the mouse moves. """
if key == arcade.key.UP:
if self.physics_engine.can_jump():
self.player_sprite.change_y = JUMP_SPEED
elif key == arcade.key.LEFT:
self.left_down = True
self.set_x_speed()
elif key == arcade.key.RIGHT:
self.right_down = True
self.set_x_speed()
def on_key_release(self, key, modifiers):
""" Called when the user presses a mouse button. """
if key == arcade.key.LEFT:
self.left_down = False
self.set_x_speed()
elif key == arcade.key.RIGHT:
self.right_down = False
self.set_x_speed()
def on_update(self, delta_time):
""" Movement and game logic """
# Call update on all sprites
self.physics_engine.update()
# Scroll the screen to the player
self.scroll_to_player()
def scroll_to_player(self):
"""
Scroll the window to the player.
if CAMERA_SPEED is 1, the camera will immediately move to the desired position.
Anything between 0 and 1 will have the camera move to the location with a smoother
pan.
"""
position = Vec2(self.player_sprite.center_x - self.width / 2,
self.player_sprite.center_y - self.height / 2)
self.camera_sprites.move_to(position, CAMERA_SPEED)
def main():
""" Main function """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main() | arcade/examples/sprite_moving_platforms.py | import arcade
from pyglet.math import Vec2
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprite with Moving Platforms Example"
SPRITE_PIXEL_SIZE = 128
GRID_PIXEL_SIZE = (SPRITE_PIXEL_SIZE * SPRITE_SCALING)
# How many pixels to keep as a minimum margin between the character
# and the edge of the screen.
VIEWPORT_MARGIN = SPRITE_PIXEL_SIZE * SPRITE_SCALING
RIGHT_MARGIN = 4 * SPRITE_PIXEL_SIZE * SPRITE_SCALING
# Physics
MOVEMENT_SPEED = 10 * SPRITE_SCALING
JUMP_SPEED = 28 * SPRITE_SCALING
GRAVITY = .9 * SPRITE_SCALING
# How fast the camera pans to the player. 1.0 is instant.
CAMERA_SPEED = 0.1
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
""" Initializer """
# Call the parent init
super().__init__(width, height, title)
# Sprite lists
# Drawing non-moving walls separate from moving walls improves performance.
self.static_wall_list = None
self.moving_wall_list = None
self.player_list = None
# Set up the player
self.player_sprite = None
self.physics_engine = None
self.game_over = False
# Create the cameras. One for the GUI, one for the sprites.
# We scroll the 'sprite world' but not the GUI.
self.camera_sprites = arcade.Camera(SCREEN_WIDTH, SCREEN_HEIGHT)
self.camera_gui = arcade.Camera(SCREEN_WIDTH, SCREEN_HEIGHT)
self.left_down = False
self.right_down = False
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.static_wall_list = arcade.SpriteList()
self.moving_wall_list = arcade.SpriteList()
self.player_list = arcade.SpriteList()
# Set up the player
self.player_sprite = arcade.Sprite(":resources:images/animated_characters/female_person/"
"femalePerson_idle.png",
SPRITE_SCALING)
self.player_sprite.center_x = 2 * GRID_PIXEL_SIZE
self.player_sprite.center_y = 3 * GRID_PIXEL_SIZE
self.player_list.append(self.player_sprite)
# Create floor
for i in range(30):
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.bottom = 0
wall.center_x = i * GRID_PIXEL_SIZE
self.static_wall_list.append(wall)
# Create platform side to side
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.center_y = 3 * GRID_PIXEL_SIZE
wall.center_x = 3 * GRID_PIXEL_SIZE
wall.boundary_left = 2 * GRID_PIXEL_SIZE
wall.boundary_right = 5 * GRID_PIXEL_SIZE
wall.change_x = 2 * SPRITE_SCALING
self.moving_wall_list.append(wall)
# Create platform side to side
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.center_y = 3 * GRID_PIXEL_SIZE
wall.center_x = 7 * GRID_PIXEL_SIZE
wall.boundary_left = 5 * GRID_PIXEL_SIZE
wall.boundary_right = 9 * GRID_PIXEL_SIZE
wall.change_x = -2 * SPRITE_SCALING
self.moving_wall_list.append(wall)
# Create platform moving up and down
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.center_y = 5 * GRID_PIXEL_SIZE
wall.center_x = 5 * GRID_PIXEL_SIZE
wall.boundary_top = 8 * GRID_PIXEL_SIZE
wall.boundary_bottom = 4 * GRID_PIXEL_SIZE
wall.change_y = 2 * SPRITE_SCALING
self.moving_wall_list.append(wall)
# Create platform moving diagonally
wall = arcade.Sprite(":resources:images/tiles/grassMid.png", SPRITE_SCALING)
wall.center_y = 5 * GRID_PIXEL_SIZE
wall.center_x = 8 * GRID_PIXEL_SIZE
wall.boundary_left = 7 * GRID_PIXEL_SIZE
wall.boundary_right = 9 * GRID_PIXEL_SIZE
wall.boundary_top = 8 * GRID_PIXEL_SIZE
wall.boundary_bottom = 4 * GRID_PIXEL_SIZE
wall.change_x = 2 * SPRITE_SCALING
wall.change_y = 2 * SPRITE_SCALING
self.moving_wall_list.append(wall)
# Create our physics engine
self.physics_engine = \
arcade.PhysicsEnginePlatformer(self.player_sprite,
[self.static_wall_list, self.moving_wall_list],
gravity_constant=GRAVITY)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
self.game_over = False
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
self.clear()
# Select the camera we'll use to draw all our sprites
self.camera_sprites.use()
# Draw the sprites.
self.static_wall_list.draw()
self.moving_wall_list.draw()
self.player_list.draw()
self.camera_gui.use()
# Put the text on the screen.
distance = self.player_sprite.right
output = f"Distance: {distance}"
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def set_x_speed(self):
if self.left_down and not self.right_down:
self.player_sprite.change_x = -MOVEMENT_SPEED
elif self.right_down and not self.left_down:
self.player_sprite.change_x = MOVEMENT_SPEED
else:
self.player_sprite.change_x = 0
def on_key_press(self, key, modifiers):
""" Called whenever the mouse moves. """
if key == arcade.key.UP:
if self.physics_engine.can_jump():
self.player_sprite.change_y = JUMP_SPEED
elif key == arcade.key.LEFT:
self.left_down = True
self.set_x_speed()
elif key == arcade.key.RIGHT:
self.right_down = True
self.set_x_speed()
def on_key_release(self, key, modifiers):
""" Called when the user presses a mouse button. """
if key == arcade.key.LEFT:
self.left_down = False
self.set_x_speed()
elif key == arcade.key.RIGHT:
self.right_down = False
self.set_x_speed()
def on_update(self, delta_time):
""" Movement and game logic """
# Call update on all sprites
self.physics_engine.update()
# Scroll the screen to the player
self.scroll_to_player()
def scroll_to_player(self):
"""
Scroll the window to the player.
if CAMERA_SPEED is 1, the camera will immediately move to the desired position.
Anything between 0 and 1 will have the camera move to the location with a smoother
pan.
"""
position = Vec2(self.player_sprite.center_x - self.width / 2,
self.player_sprite.center_y - self.height / 2)
self.camera_sprites.move_to(position, CAMERA_SPEED)
def main():
""" Main function """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main() | 0.734215 | 0.307904 |
from __future__ import annotations
from datetime import datetime
from os import getenv
from typing import Generic, Type, TypeVar
from .environment import running_in_pytest
T = TypeVar("T", bound="Singleton")
class Singleton(Generic[T], type):
_instances: dict[Type[T], T] = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__( # type: ignore
*args,
**kwargs,
)
return cls._instances[cls] # type: ignore
class Settings(metaclass=Singleton): # pylint: disable=R0902
def __init__(self):
# content
self.CONTENT_ROOT = "https://raw.githubusercontent.com/lexicalunit"
self.THUMB_URL = (
f"{self.CONTENT_ROOT}/spellbot/main/spellbot.png"
f"?{datetime.today().strftime('%Y-%m-%d')}" # workaround over-eager caching
)
self.ICO_URL = (
f"{self.CONTENT_ROOT}/spellbot/main/spellbot-sm.png"
f"?{datetime.today().strftime('%Y-%m-%d')}" # workaround over-eager caching
)
# application
self.BOT_TOKEN = getenv("BOT_TOKEN")
self.PORT = int(getenv("PORT", "3008"))
self.HOST = getenv("HOST") or "localhost"
self.DEBUG_GUILD = getenv("DEBUG_GUILD")
self.API_BASE_URL = getenv("API_BASE_URL", "https://bot.spellbot.io")
# datadog
self.DD_API_KEY = getenv("DD_API_KEY")
self.DD_APP_KEY = getenv("DD_APP_KEY")
# database
default_database_url = f"postgresql://postgres@{self.HOST}:5432/postgres"
if running_in_pytest(): # pragma: no cover
default_database_url += "-test"
database_url = getenv("DATABASE_URL") or default_database_url
if database_url.startswith("postgres://"): # pragma: no cover
# SQLAlchemy 1.4.x removed support for the postgres:// URI scheme
database_url = database_url.replace("postgres://", "postgresql://", 1)
self.DATABASE_URL = database_url
# spelltable
self.SPELLTABLE_ROOT = "https://us-central1-magic-night-30324.cloudfunctions.net"
self.SPELLTABLE_CREATE = f"{self.SPELLTABLE_ROOT}/createGame"
self.SPELLTABLE_AUTH_KEY = getenv("SPELLTABLE_AUTH_KEY")
# configuration
self.BOT_INVITE_LINK = (
r"https://discordapp.com/api/oauth2/authorize"
r"?client_id=725510263251402832"
r"&permissions=2416045137"
r"&scope=applications.commands%20bot"
)
self.VOICE_INVITE_EXPIRE_TIME_S = 14400 # 4 hours
self.EMBED_COLOR = 0x5A3EFD
self.DATABASE_ECHO = False
self.ADMIN_ROLE = "SpellBot Admin"
self.MOD_PREFIX = "Moderator"
self.EXPIRE_TIME_M = 45 # 45 minutes
# tasks
self.VOICE_GRACE_PERIOD_M = 10 # 10 minutes
self.VOICE_AGE_LIMIT_H = 5 # 5 hours
self.VOICE_CLEANUP_LOOP_M = 30 # 30 minutes
self.VOICE_CLEANUP_BATCH = 40 # batch size
self.EXPIRE_GAMES_LOOP_M = 10 # 10 minutes | src/spellbot/settings.py | from __future__ import annotations
from datetime import datetime
from os import getenv
from typing import Generic, Type, TypeVar
from .environment import running_in_pytest
T = TypeVar("T", bound="Singleton")
class Singleton(Generic[T], type):
_instances: dict[Type[T], T] = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__( # type: ignore
*args,
**kwargs,
)
return cls._instances[cls] # type: ignore
class Settings(metaclass=Singleton): # pylint: disable=R0902
def __init__(self):
# content
self.CONTENT_ROOT = "https://raw.githubusercontent.com/lexicalunit"
self.THUMB_URL = (
f"{self.CONTENT_ROOT}/spellbot/main/spellbot.png"
f"?{datetime.today().strftime('%Y-%m-%d')}" # workaround over-eager caching
)
self.ICO_URL = (
f"{self.CONTENT_ROOT}/spellbot/main/spellbot-sm.png"
f"?{datetime.today().strftime('%Y-%m-%d')}" # workaround over-eager caching
)
# application
self.BOT_TOKEN = getenv("BOT_TOKEN")
self.PORT = int(getenv("PORT", "3008"))
self.HOST = getenv("HOST") or "localhost"
self.DEBUG_GUILD = getenv("DEBUG_GUILD")
self.API_BASE_URL = getenv("API_BASE_URL", "https://bot.spellbot.io")
# datadog
self.DD_API_KEY = getenv("DD_API_KEY")
self.DD_APP_KEY = getenv("DD_APP_KEY")
# database
default_database_url = f"postgresql://postgres@{self.HOST}:5432/postgres"
if running_in_pytest(): # pragma: no cover
default_database_url += "-test"
database_url = getenv("DATABASE_URL") or default_database_url
if database_url.startswith("postgres://"): # pragma: no cover
# SQLAlchemy 1.4.x removed support for the postgres:// URI scheme
database_url = database_url.replace("postgres://", "postgresql://", 1)
self.DATABASE_URL = database_url
# spelltable
self.SPELLTABLE_ROOT = "https://us-central1-magic-night-30324.cloudfunctions.net"
self.SPELLTABLE_CREATE = f"{self.SPELLTABLE_ROOT}/createGame"
self.SPELLTABLE_AUTH_KEY = getenv("SPELLTABLE_AUTH_KEY")
# configuration
self.BOT_INVITE_LINK = (
r"https://discordapp.com/api/oauth2/authorize"
r"?client_id=725510263251402832"
r"&permissions=2416045137"
r"&scope=applications.commands%20bot"
)
self.VOICE_INVITE_EXPIRE_TIME_S = 14400 # 4 hours
self.EMBED_COLOR = 0x5A3EFD
self.DATABASE_ECHO = False
self.ADMIN_ROLE = "SpellBot Admin"
self.MOD_PREFIX = "Moderator"
self.EXPIRE_TIME_M = 45 # 45 minutes
# tasks
self.VOICE_GRACE_PERIOD_M = 10 # 10 minutes
self.VOICE_AGE_LIMIT_H = 5 # 5 hours
self.VOICE_CLEANUP_LOOP_M = 30 # 30 minutes
self.VOICE_CLEANUP_BATCH = 40 # batch size
self.EXPIRE_GAMES_LOOP_M = 10 # 10 minutes | 0.644673 | 0.100481 |
import json
from twisted.python import usage, log
from twisted.application import internet
from twisted.internet import reactor, endpoints
from twisted.web import resource, static, server
import subprocess
# curl --data-binary '{"password_hex": "<PASSWORD>", "msg1_hex": "<PASSWORD>"}' http://localhost:8705/A
# curl --data-binary '{"password_hex": "<PASSWORD>", "msg1_hex": "410a30f44d<PASSWORD>e<PASSWORD>19<PASSWORD>cf917c3f051b03325a66e54170"}' http://localhost:8705/B
# curl --data-binary '{"password_hex": "<PASSWORD>", "msg1_hex": "53fd036b02cb66af2c4283708b455f45282ef9482640f30923de2584040a929c52"}' http://localhost:8705/Symmetric
class Dispatcher(resource.Resource, object):
def __init__(self, cmd_path, **kwargs):
resource.Resource.__init__(self)
assert isinstance(cmd_path, type(b"")), (cmd_path, type(cmd_path))
self._cmd_path = cmd_path
self._extra_args = kwargs
def render_POST(self, request):
req = json.load(request.content)
for k,v in self._extra_args.items():
req[k] = v
req_data = json.dumps(req)
p = subprocess.Popen([self._cmd_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
(output, err) = p.communicate(req_data)
rc = p.returncode
if rc != 0:
log.msg("error running command '{}': rc={}, err='{}'".format(
self._cmd_path, rc, err))
raise ValueError()
return output
class ABS(resource.Resource, object):
def __init__(self, cmd_path):
resource.Resource.__init__(self)
self.putChild(b"A", Dispatcher(cmd_path, which="A"))
self.putChild(b"B", Dispatcher(cmd_path, which="B"))
self.putChild(b"S", Dispatcher(cmd_path, which="Symmetric"))
# 'twist' will load this file and look for 'Options' and 'makeService'
class Options(usage.Options):
synopsis = "[options]"
longdesc = "SPAKE2 interop server"
optParameters = [
["port", "p", "tcp:8705", "listening endpoint"],
]
def makeService(config):
root = resource.Resource()
site = server.Site(root)
root.putChild(b"", static.Data(b"SPAKE2 interop server", "text/plain"))
root.putChild(b"0.3", ABS("ve-p03/bin/spake2_interop_python_0_3"))
root.putChild(b"0.7", ABS("ve-p07/bin/spake2_interop_python_0_7"))
ep = endpoints.serverFromString(reactor, config["port"])
s = internet.StreamServerEndpointService(ep, site)
return s | server/src/spake2_interop_server/server.py | import json
from twisted.python import usage, log
from twisted.application import internet
from twisted.internet import reactor, endpoints
from twisted.web import resource, static, server
import subprocess
# curl --data-binary '{"password_hex": "<PASSWORD>", "msg1_hex": "<PASSWORD>"}' http://localhost:8705/A
# curl --data-binary '{"password_hex": "<PASSWORD>", "msg1_hex": "410a30f44d<PASSWORD>e<PASSWORD>19<PASSWORD>cf917c3f051b03325a66e54170"}' http://localhost:8705/B
# curl --data-binary '{"password_hex": "<PASSWORD>", "msg1_hex": "53fd036b02cb66af2c4283708b455f45282ef9482640f30923de2584040a929c52"}' http://localhost:8705/Symmetric
class Dispatcher(resource.Resource, object):
def __init__(self, cmd_path, **kwargs):
resource.Resource.__init__(self)
assert isinstance(cmd_path, type(b"")), (cmd_path, type(cmd_path))
self._cmd_path = cmd_path
self._extra_args = kwargs
def render_POST(self, request):
req = json.load(request.content)
for k,v in self._extra_args.items():
req[k] = v
req_data = json.dumps(req)
p = subprocess.Popen([self._cmd_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
)
(output, err) = p.communicate(req_data)
rc = p.returncode
if rc != 0:
log.msg("error running command '{}': rc={}, err='{}'".format(
self._cmd_path, rc, err))
raise ValueError()
return output
class ABS(resource.Resource, object):
def __init__(self, cmd_path):
resource.Resource.__init__(self)
self.putChild(b"A", Dispatcher(cmd_path, which="A"))
self.putChild(b"B", Dispatcher(cmd_path, which="B"))
self.putChild(b"S", Dispatcher(cmd_path, which="Symmetric"))
# 'twist' will load this file and look for 'Options' and 'makeService'
class Options(usage.Options):
synopsis = "[options]"
longdesc = "SPAKE2 interop server"
optParameters = [
["port", "p", "tcp:8705", "listening endpoint"],
]
def makeService(config):
root = resource.Resource()
site = server.Site(root)
root.putChild(b"", static.Data(b"SPAKE2 interop server", "text/plain"))
root.putChild(b"0.3", ABS("ve-p03/bin/spake2_interop_python_0_3"))
root.putChild(b"0.7", ABS("ve-p07/bin/spake2_interop_python_0_7"))
ep = endpoints.serverFromString(reactor, config["port"])
s = internet.StreamServerEndpointService(ep, site)
return s | 0.420005 | 0.081082 |
import os
import shutil
import tarfile
import tempfile
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
BACKUP_TEMP_DIR = "falcon-upgrade-backup"
BACKUP_DATA_ARCHIVE = "falcon-local-backup.tar"
BACKUP_CONF_ARCHIVE = "falcon-conf-backup.tar"
def post_stop_backup():
"""
Backs up the falcon configuration and data directories as part of the
upgrade process.
:return:
"""
Logger.info('Backing up Falcon data and configuration directories before upgrade...')
directoryMappings = _get_directory_mappings()
absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
if not os.path.isdir(absolute_backup_dir):
os.makedirs(absolute_backup_dir)
for directory in directoryMappings:
if not os.path.isdir(directory):
raise Fail("Unable to backup missing directory {0}".format(directory))
archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
Logger.info('Compressing {0} to {1}'.format(directory, archive))
if os.path.exists(archive):
os.remove(archive)
tarball = None
try:
tarball = tarfile.open(archive, "w")
tarball.add(directory, arcname=os.path.basename(directory))
finally:
if tarball:
tarball.close()
def pre_start_restore():
"""
Restores the data and configuration backups to their proper locations
after an upgrade has completed.
:return:
"""
Logger.info('Restoring Falcon data and configuration directories after upgrade...')
directoryMappings = _get_directory_mappings()
for directory in directoryMappings:
archive = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR,
directoryMappings[directory])
if not os.path.isfile(archive):
raise Fail("Unable to restore missing backup archive {0}".format(archive))
Logger.info('Extracting {0} to {1}'.format(archive, directory))
tarball = None
try:
tarball = tarfile.open(archive, "r")
tarball.extractall(directory)
finally:
if tarball:
tarball.close()
# cleanup
shutil.rmtree(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR))
def _get_directory_mappings():
"""
Gets a dictionary of directory to archive name that represents the
directories that need to be backed up and their output tarball archive targets
:return: the dictionary of directory to tarball mappings
"""
import params
return { params.falcon_local_dir : BACKUP_DATA_ARCHIVE,
params.falcon_conf_dir : BACKUP_CONF_ARCHIVE } | ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server_upgrade.py | import os
import shutil
import tarfile
import tempfile
from resource_management.core.logger import Logger
from resource_management.core.exceptions import Fail
BACKUP_TEMP_DIR = "falcon-upgrade-backup"
BACKUP_DATA_ARCHIVE = "falcon-local-backup.tar"
BACKUP_CONF_ARCHIVE = "falcon-conf-backup.tar"
def post_stop_backup():
"""
Backs up the falcon configuration and data directories as part of the
upgrade process.
:return:
"""
Logger.info('Backing up Falcon data and configuration directories before upgrade...')
directoryMappings = _get_directory_mappings()
absolute_backup_dir = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR)
if not os.path.isdir(absolute_backup_dir):
os.makedirs(absolute_backup_dir)
for directory in directoryMappings:
if not os.path.isdir(directory):
raise Fail("Unable to backup missing directory {0}".format(directory))
archive = os.path.join(absolute_backup_dir, directoryMappings[directory])
Logger.info('Compressing {0} to {1}'.format(directory, archive))
if os.path.exists(archive):
os.remove(archive)
tarball = None
try:
tarball = tarfile.open(archive, "w")
tarball.add(directory, arcname=os.path.basename(directory))
finally:
if tarball:
tarball.close()
def pre_start_restore():
"""
Restores the data and configuration backups to their proper locations
after an upgrade has completed.
:return:
"""
Logger.info('Restoring Falcon data and configuration directories after upgrade...')
directoryMappings = _get_directory_mappings()
for directory in directoryMappings:
archive = os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR,
directoryMappings[directory])
if not os.path.isfile(archive):
raise Fail("Unable to restore missing backup archive {0}".format(archive))
Logger.info('Extracting {0} to {1}'.format(archive, directory))
tarball = None
try:
tarball = tarfile.open(archive, "r")
tarball.extractall(directory)
finally:
if tarball:
tarball.close()
# cleanup
shutil.rmtree(os.path.join(tempfile.gettempdir(), BACKUP_TEMP_DIR))
def _get_directory_mappings():
"""
Gets a dictionary of directory to archive name that represents the
directories that need to be backed up and their output tarball archive targets
:return: the dictionary of directory to tarball mappings
"""
import params
return { params.falcon_local_dir : BACKUP_DATA_ARCHIVE,
params.falcon_conf_dir : BACKUP_CONF_ARCHIVE } | 0.254046 | 0.067547 |
from . import entropy
from .shifterator import Shift
class WeightedAvgShift(Shift):
"""
Shift object for calculating weighted scores of two systems of types,
and the shift between them
Parameters
----------
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types
type2score_1, type2score_2: dict or str, optional
If dict, types are keys and values are scores associated with each
type. If str, the name of a score lexicon included in Shifterator.
If None and other type2score is None, defaults to uniform scores
across types. Otherwise defaults to the other type2score dict
reference_value: str or float, optional
The reference score to use to partition scores into two different
regimes. If 'average', uses the average score according to type2freq_1
and type2score_1. If None and a lexicon is selected for type2score,
uses the respective middle point in that lexicon's scale. Otherwise
if None, uses zero as the reference point
handle_missing_scores: str, optional
If 'error', throws an error whenever a word has a score in one score
dictionary but not the other. If 'exclude', excludes any word that is
missing a score in one score dictionary from all word shift
calculations, regardless if it may have a score in the other dictionary.
If 'adopt' and the score is missing in one dictionary, then uses the
score from the other dictionary if it is available
stop_lens: iterable of 2-tuples, optional
Denotes intervals of scores that should be excluded from word shifts
calculations. Types with scores in this range will be excluded from word
shift calculations
stop_words: set, optional
Denotes words that should be excluded from word shifts calculations
normalization: str, optional
If 'variation', normalizes shift scores so that the sum of
their absolute values sums to 1. If 'trajectory', normalizes
them so that the sum of shift scores is 1 or -1. The trajectory
normalization cannot be applied if the total shift score is 0, so
scores are left unnormalized if the total is 0 and 'trajectory' is
specified
"""
def __init__(
self,
type2freq_1,
type2freq_2,
type2score_1=None,
type2score_2=None,
reference_value=None,
handle_missing_scores="error",
stop_lens=None,
stop_words=set(),
normalization="variation",
):
super().__init__(
type2freq_1=type2freq_1,
type2freq_2=type2freq_2,
type2score_1=type2score_1,
type2score_2=type2score_2,
reference_value=reference_value,
handle_missing_scores=handle_missing_scores,
stop_lens=stop_lens,
stop_words=stop_words,
normalization=normalization,
)
class ProportionShift(Shift):
"""
Shift object for calculating differences in proportions of types across two
systems
Parameters
__________
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types
"""
def __init__(self, type2freq_1, type2freq_2):
# Set relative frequency to 0 for types that don't appear
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
types = set(type2freq_1.keys()).union(type2freq_2.keys())
for t in types:
if t not in type2freq_1:
type2freq_1[t] = 0
elif t not in type2freq_2:
type2freq_2[t] = 0
# Initialize shift object
super().__init__(
type2freq_1=type2freq_1,
type2freq_2=type2freq_2,
type2score_1=None,
type2score_2=None,
reference_value=0,
handle_missing_scores="error",
stop_lens=None,
stop_words=None,
normalization="variation",
)
def get_shift_graph(
self,
top_n=50,
show_plot=True,
detailed=False,
text_size_inset=True,
cumulative_inset=True,
title=None,
filename=None,
**kwargs
):
if title is None:
title = ""
super().get_shift_graph(
top_n=top_n,
text_size_inset=text_size_inset,
cumulative_inset=cumulative_inset,
detailed=detailed,
show_plot=show_plot,
filename=filename,
show_total=False,
title=title,
**kwargs
)
class EntropyShift(Shift):
"""
Shift object for calculating the shift in entropy between two systems
Parameters
----------
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types
base: float, optional
Base of the logarithm for calculating entropy
alpha: float, optional
The parameter for the generalized Tsallis entropy. Setting `alpha=1`
recovers the Shannon entropy. Higher `alpha` emphasizes more common
types, lower `alpha` emphasizes less common types
For details: https://en.wikipedia.org/wiki/Tsallis_entropy
reference_value: str or float, optional
The reference score to use to partition scores into two different
regimes. If 'average', uses the average score according to type2freq_1
and type2score_1. Otherwise, uses zero as the reference point
normalization: str, optional
If 'variation', normalizes shift scores so that the sum of
their absolute values sums to 1. If 'trajectory', normalizes
them so that the sum of shift scores is 1 or -1. The trajectory
normalization cannot be applied if the total shift score is 0, so
scores are left unnormalized if the total is 0 and 'trajectory' is
specified
"""
def __init__(
self,
type2freq_1,
type2freq_2,
base=2,
alpha=1,
reference_value=0,
normalization="variation",
):
# Get relative frequencies
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
type2p_1 = entropy.get_relative_freqs(type2freq_1)
type2p_2 = entropy.get_relative_freqs(type2freq_2)
# Get entropy scores
type2s_1, type2s_2 = entropy.get_entropy_scores(type2p_1, type2p_2, base, alpha)
# Initialize shift
super().__init__(
type2freq_1=type2freq_1,
type2freq_2=type2freq_2,
type2score_1=type2s_1,
type2score_2=type2s_2,
handle_missing_scores="error",
stop_lens=None,
stop_words=None,
reference_value=reference_value,
normalization=normalization,
)
self.type2p_1 = type2p_1
self.type2p_2 = type2p_2
self.alpha = alpha
def get_shift_graph(
self,
top_n=50,
show_plot=True,
detailed=False,
text_size_inset=True,
cumulative_inset=True,
filename=None,
**kwargs
):
super().get_shift_graph(
top_n=top_n,
text_size_inset=text_size_inset,
cumulative_inset=cumulative_inset,
detailed=detailed,
show_plot=show_plot,
filename=filename,
**kwargs
)
class KLDivergenceShift(Shift):
"""
Shift object for calculating the Kullback-Leibler divergence (KLD) between
two systems
Parameters
----------
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types.
The KLD will be computed with respect type2freq_1, i.e. D(T2 || T1).
For the KLD to be well defined, all types must have nonzero frequencies
in both type2freq_1 and type2_freq2
base: float, optional
Base of the logarithm for calculating entropy
stop_lens: iterable of 2-tuples, optional
Denotes intervals that should be excluded when calculating shift
scores
normalization: str, optional
If 'variation', normalizes shift scores so that the sum of
their absolute values sums to 1. If 'trajectory', normalizes
them so that the sum of shift scores is 1 or -1. The trajectory
normalization cannot be applied if the total shift score is 0, so
scores are left unnormalized if the total is 0 and 'trajectory' is
specified
"""
def __init__(
self,
type2freq_1,
type2freq_2,
base=2,
reference_value=0,
normalization="variation",
):
# Check that KLD is well defined
types_1 = set(type2freq_1.keys())
types_2 = set(type2freq_2.keys())
if len(types_1.symmetric_difference(types_2)) > 0:
err = (
"There are types that appear in either type2freq_1 or "
+ "type2freq_2 but not the other: the KL divergence is not "
+ "well defined"
)
raise ValueError(err)
# Get relative frequencies
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
type2p_1 = entropy.get_relative_freqs(type2freq_1)
type2p_2 = entropy.get_relative_freqs(type2freq_2)
# Get surprisal scores
type2s_1 = {t: p * -1 * entropy.log(p, base) for t, p in type2p_1.items()}
type2s_2 = {t: p * -1 * entropy.log(p, base) for t, p in type2p_2.items()}
# Initialize shift
super().__init__(
type2freq_1=type2p_2,
type2freq_2=type2p_2,
type2score_1=type2s_1,
type2score_2=type2s_2,
handle_missing_scores="error",
stop_lens=None,
stop_words=None,
reference_value=reference_value,
normalization=normalization,
)
self.type2p_1 = type2p_1
self.type2p_2 = type2p_2
def get_shift_graph(
self,
top_n=50,
show_plot=True,
detailed=False,
text_size_inset=True,
cumulative_inset=True,
title=None,
filename=None,
**kwargs
):
if title is None:
title = ""
super().get_shift_graph(
top_n=top_n,
text_size_inset=text_size_inset,
cumulative_inset=cumulative_inset,
detailed=detailed,
show_plot=show_plot,
title=title,
filename=filename,
**kwargs
)
class JSDivergenceShift(Shift):
"""
Shift object for calculating the Jensen-Shannon divergence (JSD) between two
systems
Parameters
----------
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types
weight_1, weight_2: float
Relative weights of type2freq_1 and type2frq_2 when constructing their
mixed distribution. Should sum to 1
base: float, optional
Base of the logarithm for calculating entropy
alpha: float, optional
The parameter for the generalized Tsallis entropy. Setting `alpha=1`
recovers the Shannon entropy. Higher `alpha` emphasizes more common
types, lower `alpha` emphasizes less common types
For details: https://en.wikipedia.org/wiki/Tsallis_entropy
reference_value: str or float, optional
The reference score to use to partition scores into two different
regimes. Defaults to zero as the reference point
normalization: str, optional
If 'variation', normalizes shift scores so that the sum of
their absolute values sums to 1. If 'trajectory', normalizes
them so that the sum of shift scores is 1 or -1. The trajectory
normalization cannot be applied if the total shift score is 0, so
scores are left unnormalized if the total is 0 and 'trajectory' is
specified
"""
def __init__(
self,
type2freq_1,
type2freq_2,
base=2,
weight_1=0.5,
weight_2=0.5,
alpha=1,
reference_value=0,
normalization="variation",
):
# Check weights
if weight_1 + weight_2 != 1:
raise ValueError("weight_1 and weight_2 do not sum to 1")
# Get relative frequencies
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
type2p_1 = entropy.get_relative_freqs(type2freq_1)
type2p_2 = entropy.get_relative_freqs(type2freq_2)
# Get shift scores
type2m, type2s_1, type2s_2 = entropy.get_jsd_scores(
type2p_1,
type2p_2,
weight_1=weight_1,
weight_2=weight_2,
base=base,
alpha=alpha,
)
# Initialize shift object
super().__init__(
type2freq_1=type2freq_1,
type2freq_2=type2freq_2,
type2score_1=type2s_1,
type2score_2=type2s_2,
reference_value=reference_value,
handle_missing_scores="error",
normalization=normalization,
stop_lens=None,
stop_words=None,
)
self.type2p_1 = type2p_1
self.type2p_2 = type2p_2
self.type2m = type2m
self.alpha = alpha
def get_shift_graph(
self,
top_n=50,
show_plot=True,
detailed=False,
text_size_inset=True,
cumulative_inset=True,
title=None,
filename=None,
**kwargs
):
if self.alpha == 1 and self.reference_value == 0:
all_pos_contributions = True
else:
all_pos_contributions = False
if title is None:
title = ""
super().get_shift_graph(
top_n=top_n,
text_size_inset=text_size_inset,
cumulative_inset=cumulative_inset,
detailed=detailed,
show_plot=show_plot,
filename=filename,
title=title,
all_pos_contributions=all_pos_contributions,
**kwargs
) | shifterator/shifts.py | from . import entropy
from .shifterator import Shift
class WeightedAvgShift(Shift):
"""
Shift object for calculating weighted scores of two systems of types,
and the shift between them
Parameters
----------
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types
type2score_1, type2score_2: dict or str, optional
If dict, types are keys and values are scores associated with each
type. If str, the name of a score lexicon included in Shifterator.
If None and other type2score is None, defaults to uniform scores
across types. Otherwise defaults to the other type2score dict
reference_value: str or float, optional
The reference score to use to partition scores into two different
regimes. If 'average', uses the average score according to type2freq_1
and type2score_1. If None and a lexicon is selected for type2score,
uses the respective middle point in that lexicon's scale. Otherwise
if None, uses zero as the reference point
handle_missing_scores: str, optional
If 'error', throws an error whenever a word has a score in one score
dictionary but not the other. If 'exclude', excludes any word that is
missing a score in one score dictionary from all word shift
calculations, regardless if it may have a score in the other dictionary.
If 'adopt' and the score is missing in one dictionary, then uses the
score from the other dictionary if it is available
stop_lens: iterable of 2-tuples, optional
Denotes intervals of scores that should be excluded from word shifts
calculations. Types with scores in this range will be excluded from word
shift calculations
stop_words: set, optional
Denotes words that should be excluded from word shifts calculations
normalization: str, optional
If 'variation', normalizes shift scores so that the sum of
their absolute values sums to 1. If 'trajectory', normalizes
them so that the sum of shift scores is 1 or -1. The trajectory
normalization cannot be applied if the total shift score is 0, so
scores are left unnormalized if the total is 0 and 'trajectory' is
specified
"""
def __init__(
self,
type2freq_1,
type2freq_2,
type2score_1=None,
type2score_2=None,
reference_value=None,
handle_missing_scores="error",
stop_lens=None,
stop_words=set(),
normalization="variation",
):
super().__init__(
type2freq_1=type2freq_1,
type2freq_2=type2freq_2,
type2score_1=type2score_1,
type2score_2=type2score_2,
reference_value=reference_value,
handle_missing_scores=handle_missing_scores,
stop_lens=stop_lens,
stop_words=stop_words,
normalization=normalization,
)
class ProportionShift(Shift):
"""
Shift object for calculating differences in proportions of types across two
systems
Parameters
__________
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types
"""
def __init__(self, type2freq_1, type2freq_2):
# Set relative frequency to 0 for types that don't appear
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
types = set(type2freq_1.keys()).union(type2freq_2.keys())
for t in types:
if t not in type2freq_1:
type2freq_1[t] = 0
elif t not in type2freq_2:
type2freq_2[t] = 0
# Initialize shift object
super().__init__(
type2freq_1=type2freq_1,
type2freq_2=type2freq_2,
type2score_1=None,
type2score_2=None,
reference_value=0,
handle_missing_scores="error",
stop_lens=None,
stop_words=None,
normalization="variation",
)
def get_shift_graph(
self,
top_n=50,
show_plot=True,
detailed=False,
text_size_inset=True,
cumulative_inset=True,
title=None,
filename=None,
**kwargs
):
if title is None:
title = ""
super().get_shift_graph(
top_n=top_n,
text_size_inset=text_size_inset,
cumulative_inset=cumulative_inset,
detailed=detailed,
show_plot=show_plot,
filename=filename,
show_total=False,
title=title,
**kwargs
)
class EntropyShift(Shift):
"""
Shift object for calculating the shift in entropy between two systems
Parameters
----------
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types
base: float, optional
Base of the logarithm for calculating entropy
alpha: float, optional
The parameter for the generalized Tsallis entropy. Setting `alpha=1`
recovers the Shannon entropy. Higher `alpha` emphasizes more common
types, lower `alpha` emphasizes less common types
For details: https://en.wikipedia.org/wiki/Tsallis_entropy
reference_value: str or float, optional
The reference score to use to partition scores into two different
regimes. If 'average', uses the average score according to type2freq_1
and type2score_1. Otherwise, uses zero as the reference point
normalization: str, optional
If 'variation', normalizes shift scores so that the sum of
their absolute values sums to 1. If 'trajectory', normalizes
them so that the sum of shift scores is 1 or -1. The trajectory
normalization cannot be applied if the total shift score is 0, so
scores are left unnormalized if the total is 0 and 'trajectory' is
specified
"""
def __init__(
self,
type2freq_1,
type2freq_2,
base=2,
alpha=1,
reference_value=0,
normalization="variation",
):
# Get relative frequencies
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
type2p_1 = entropy.get_relative_freqs(type2freq_1)
type2p_2 = entropy.get_relative_freqs(type2freq_2)
# Get entropy scores
type2s_1, type2s_2 = entropy.get_entropy_scores(type2p_1, type2p_2, base, alpha)
# Initialize shift
super().__init__(
type2freq_1=type2freq_1,
type2freq_2=type2freq_2,
type2score_1=type2s_1,
type2score_2=type2s_2,
handle_missing_scores="error",
stop_lens=None,
stop_words=None,
reference_value=reference_value,
normalization=normalization,
)
self.type2p_1 = type2p_1
self.type2p_2 = type2p_2
self.alpha = alpha
def get_shift_graph(
self,
top_n=50,
show_plot=True,
detailed=False,
text_size_inset=True,
cumulative_inset=True,
filename=None,
**kwargs
):
super().get_shift_graph(
top_n=top_n,
text_size_inset=text_size_inset,
cumulative_inset=cumulative_inset,
detailed=detailed,
show_plot=show_plot,
filename=filename,
**kwargs
)
class KLDivergenceShift(Shift):
"""
Shift object for calculating the Kullback-Leibler divergence (KLD) between
two systems
Parameters
----------
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types.
The KLD will be computed with respect type2freq_1, i.e. D(T2 || T1).
For the KLD to be well defined, all types must have nonzero frequencies
in both type2freq_1 and type2_freq2
base: float, optional
Base of the logarithm for calculating entropy
stop_lens: iterable of 2-tuples, optional
Denotes intervals that should be excluded when calculating shift
scores
normalization: str, optional
If 'variation', normalizes shift scores so that the sum of
their absolute values sums to 1. If 'trajectory', normalizes
them so that the sum of shift scores is 1 or -1. The trajectory
normalization cannot be applied if the total shift score is 0, so
scores are left unnormalized if the total is 0 and 'trajectory' is
specified
"""
def __init__(
self,
type2freq_1,
type2freq_2,
base=2,
reference_value=0,
normalization="variation",
):
# Check that KLD is well defined
types_1 = set(type2freq_1.keys())
types_2 = set(type2freq_2.keys())
if len(types_1.symmetric_difference(types_2)) > 0:
err = (
"There are types that appear in either type2freq_1 or "
+ "type2freq_2 but not the other: the KL divergence is not "
+ "well defined"
)
raise ValueError(err)
# Get relative frequencies
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
type2p_1 = entropy.get_relative_freqs(type2freq_1)
type2p_2 = entropy.get_relative_freqs(type2freq_2)
# Get surprisal scores
type2s_1 = {t: p * -1 * entropy.log(p, base) for t, p in type2p_1.items()}
type2s_2 = {t: p * -1 * entropy.log(p, base) for t, p in type2p_2.items()}
# Initialize shift
super().__init__(
type2freq_1=type2p_2,
type2freq_2=type2p_2,
type2score_1=type2s_1,
type2score_2=type2s_2,
handle_missing_scores="error",
stop_lens=None,
stop_words=None,
reference_value=reference_value,
normalization=normalization,
)
self.type2p_1 = type2p_1
self.type2p_2 = type2p_2
def get_shift_graph(
self,
top_n=50,
show_plot=True,
detailed=False,
text_size_inset=True,
cumulative_inset=True,
title=None,
filename=None,
**kwargs
):
if title is None:
title = ""
super().get_shift_graph(
top_n=top_n,
text_size_inset=text_size_inset,
cumulative_inset=cumulative_inset,
detailed=detailed,
show_plot=show_plot,
title=title,
filename=filename,
**kwargs
)
class JSDivergenceShift(Shift):
"""
Shift object for calculating the Jensen-Shannon divergence (JSD) between two
systems
Parameters
----------
type2freq_1, type2freq_2: dict
Keys are types of a system and values are frequencies of those types
weight_1, weight_2: float
Relative weights of type2freq_1 and type2frq_2 when constructing their
mixed distribution. Should sum to 1
base: float, optional
Base of the logarithm for calculating entropy
alpha: float, optional
The parameter for the generalized Tsallis entropy. Setting `alpha=1`
recovers the Shannon entropy. Higher `alpha` emphasizes more common
types, lower `alpha` emphasizes less common types
For details: https://en.wikipedia.org/wiki/Tsallis_entropy
reference_value: str or float, optional
The reference score to use to partition scores into two different
regimes. Defaults to zero as the reference point
normalization: str, optional
If 'variation', normalizes shift scores so that the sum of
their absolute values sums to 1. If 'trajectory', normalizes
them so that the sum of shift scores is 1 or -1. The trajectory
normalization cannot be applied if the total shift score is 0, so
scores are left unnormalized if the total is 0 and 'trajectory' is
specified
"""
def __init__(
self,
type2freq_1,
type2freq_2,
base=2,
weight_1=0.5,
weight_2=0.5,
alpha=1,
reference_value=0,
normalization="variation",
):
# Check weights
if weight_1 + weight_2 != 1:
raise ValueError("weight_1 and weight_2 do not sum to 1")
# Get relative frequencies
type2freq_1 = type2freq_1.copy()
type2freq_2 = type2freq_2.copy()
type2p_1 = entropy.get_relative_freqs(type2freq_1)
type2p_2 = entropy.get_relative_freqs(type2freq_2)
# Get shift scores
type2m, type2s_1, type2s_2 = entropy.get_jsd_scores(
type2p_1,
type2p_2,
weight_1=weight_1,
weight_2=weight_2,
base=base,
alpha=alpha,
)
# Initialize shift object
super().__init__(
type2freq_1=type2freq_1,
type2freq_2=type2freq_2,
type2score_1=type2s_1,
type2score_2=type2s_2,
reference_value=reference_value,
handle_missing_scores="error",
normalization=normalization,
stop_lens=None,
stop_words=None,
)
self.type2p_1 = type2p_1
self.type2p_2 = type2p_2
self.type2m = type2m
self.alpha = alpha
def get_shift_graph(
self,
top_n=50,
show_plot=True,
detailed=False,
text_size_inset=True,
cumulative_inset=True,
title=None,
filename=None,
**kwargs
):
if self.alpha == 1 and self.reference_value == 0:
all_pos_contributions = True
else:
all_pos_contributions = False
if title is None:
title = ""
super().get_shift_graph(
top_n=top_n,
text_size_inset=text_size_inset,
cumulative_inset=cumulative_inset,
detailed=detailed,
show_plot=show_plot,
filename=filename,
title=title,
all_pos_contributions=all_pos_contributions,
**kwargs
) | 0.952794 | 0.600803 |
import StringIO
import unittest
import mock
from telemetry import story
from telemetry.internal.results import page_test_results
from telemetry import page as page_module
_GROUPING_KEY_DEFAULT = {'1': '2'}
def _MakeStorySet():
story_set = story.StorySet()
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set,
name='http://www.foo.com/'))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set,
name='http://www.bar.com/'))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set,
name='http://www.baz.com/'))
story_set.AddStory(
page_module.Page('http://www.roz.com/', story_set,
name='http://www.roz.com/'))
story_set.AddStory(
page_module.Page('http://www.fus.com/', story_set,
grouping_keys=_GROUPING_KEY_DEFAULT,
name='http://www.fus.com/'))
story_set.AddStory(
page_module.Page('http://www.ro.com/', story_set,
grouping_keys=_GROUPING_KEY_DEFAULT,
name='http://www.ro.com/'))
return story_set
class GTestProgressReporterTest(unittest.TestCase):
def setUp(self):
self._output_stream = StringIO.StringIO()
self._mock_time = mock.patch('time.time').start()
self._mock_time.return_value = 0.0
def tearDown(self):
mock.patch.stopall()
def _MakePageTestResults(self):
return page_test_results.PageTestResults(
progress_stream=self._output_stream,
benchmark_name='bench',
benchmark_description='foo')
def assertOutputEquals(self, expected):
self.assertMultiLineEqual(expected, self._output_stream.getvalue())
def testSingleSuccessPage(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
self._mock_time.return_value = 0.007
results.DidRunPage(test_story_set.stories[0])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'[ OK ] bench/http://www.foo.com/ (7 ms)\n'
'[ PASSED ] 1 test.\n\n')
self.assertOutputEquals(expected)
def testSingleSuccessPageWithGroupingKeys(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[4])
self._mock_time.return_value = 0.007
results.DidRunPage(test_story_set.stories[4])
expected = ("[ RUN ] bench/http://www.fus.com/@{'1': '2'}\n"
"[ OK ] bench/http://www.fus.com/@{'1': '2'} (7 ms)\n"
"[ PASSED ] 1 test.\n\n")
self.assertOutputEquals(expected)
def testSingleFailedPage(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[0])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'[ FAILED ] bench/http://www.foo.com/ (0 ms)\n'
'[ PASSED ] 0 tests.\n'
'[ FAILED ] 1 test, listed below:\n'
'[ FAILED ] bench/http://www.foo.com/\n\n'
'1 FAILED TEST\n\n')
self.assertOutputEquals(expected)
def testSingleFailedPageWithGroupingKeys(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[4])
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[4])
expected = ("[ RUN ] bench/http://www.fus.com/@{'1': '2'}\n"
"[ FAILED ] bench/http://www.fus.com/@{'1': '2'} (0 ms)\n"
"[ PASSED ] 0 tests.\n"
"[ FAILED ] 1 test, listed below:\n"
"[ FAILED ] bench/http://www.fus.com/@{'1': '2'}\n\n"
"1 FAILED TEST\n\n")
self.assertOutputEquals(expected)
def testSingleSkippedPage(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
self._mock_time.return_value = 0.007
results.Skip('Page skipped for testing reason')
results.DidRunPage(test_story_set.stories[0])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'== Skipping story: Page skipped for testing reason ==\n'
'[ SKIPPED ] bench/http://www.foo.com/ (7 ms)\n'
'[ PASSED ] 0 tests.\n'
'[ SKIPPED ] 1 test.\n\n')
self.assertOutputEquals(expected)
def testPassAndFailedPages(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
self._mock_time.return_value = 0.007
results.DidRunPage(test_story_set.stories[0])
results.WillRunPage(test_story_set.stories[1])
self._mock_time.return_value = 0.009
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[1])
results.WillRunPage(test_story_set.stories[2])
self._mock_time.return_value = 0.015
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[2])
results.WillRunPage(test_story_set.stories[3])
self._mock_time.return_value = 0.020
results.DidRunPage(test_story_set.stories[3])
results.WillRunPage(test_story_set.stories[4])
self._mock_time.return_value = 0.025
results.DidRunPage(test_story_set.stories[4])
results.WillRunPage(test_story_set.stories[5])
self._mock_time.return_value = 0.030
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[5])
expected = ("[ RUN ] bench/http://www.foo.com/\n"
"[ OK ] bench/http://www.foo.com/ (7 ms)\n"
"[ RUN ] bench/http://www.bar.com/\n"
"[ FAILED ] bench/http://www.bar.com/ (2 ms)\n"
"[ RUN ] bench/http://www.baz.com/\n"
"[ FAILED ] bench/http://www.baz.com/ (6 ms)\n"
"[ RUN ] bench/http://www.roz.com/\n"
"[ OK ] bench/http://www.roz.com/ (5 ms)\n"
"[ RUN ] bench/http://www.fus.com/@{'1': '2'}\n"
"[ OK ] bench/http://www.fus.com/@{'1': '2'} (5 ms)\n"
"[ RUN ] bench/http://www.ro.com/@{'1': '2'}\n"
"[ FAILED ] bench/http://www.ro.com/@{'1': '2'} (5 ms)\n"
"[ PASSED ] 3 tests.\n"
"[ FAILED ] 3 tests, listed below:\n"
"[ FAILED ] bench/http://www.bar.com/\n"
"[ FAILED ] bench/http://www.baz.com/\n"
"[ FAILED ] bench/http://www.ro.com/@{'1': '2'}\n\n"
"3 FAILED TESTS\n\n")
self.assertOutputEquals(expected)
def testStreamingResults(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
self._mock_time.return_value = 0.007
results.DidRunPage(test_story_set.stories[0])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'[ OK ] bench/http://www.foo.com/ (7 ms)\n')
self.assertOutputEquals(expected)
results.WillRunPage(test_story_set.stories[1])
self._mock_time.return_value = 0.009
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[1])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'[ OK ] bench/http://www.foo.com/ (7 ms)\n'
'[ RUN ] bench/http://www.bar.com/\n'
'[ FAILED ] bench/http://www.bar.com/ (2 ms)\n')
self.assertOutputEquals(expected) | telemetry/telemetry/internal/results/gtest_progress_reporter_unittest.py |
import StringIO
import unittest
import mock
from telemetry import story
from telemetry.internal.results import page_test_results
from telemetry import page as page_module
_GROUPING_KEY_DEFAULT = {'1': '2'}
def _MakeStorySet():
story_set = story.StorySet()
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set,
name='http://www.foo.com/'))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set,
name='http://www.bar.com/'))
story_set.AddStory(
page_module.Page('http://www.baz.com/', story_set,
name='http://www.baz.com/'))
story_set.AddStory(
page_module.Page('http://www.roz.com/', story_set,
name='http://www.roz.com/'))
story_set.AddStory(
page_module.Page('http://www.fus.com/', story_set,
grouping_keys=_GROUPING_KEY_DEFAULT,
name='http://www.fus.com/'))
story_set.AddStory(
page_module.Page('http://www.ro.com/', story_set,
grouping_keys=_GROUPING_KEY_DEFAULT,
name='http://www.ro.com/'))
return story_set
class GTestProgressReporterTest(unittest.TestCase):
def setUp(self):
self._output_stream = StringIO.StringIO()
self._mock_time = mock.patch('time.time').start()
self._mock_time.return_value = 0.0
def tearDown(self):
mock.patch.stopall()
def _MakePageTestResults(self):
return page_test_results.PageTestResults(
progress_stream=self._output_stream,
benchmark_name='bench',
benchmark_description='foo')
def assertOutputEquals(self, expected):
self.assertMultiLineEqual(expected, self._output_stream.getvalue())
def testSingleSuccessPage(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
self._mock_time.return_value = 0.007
results.DidRunPage(test_story_set.stories[0])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'[ OK ] bench/http://www.foo.com/ (7 ms)\n'
'[ PASSED ] 1 test.\n\n')
self.assertOutputEquals(expected)
def testSingleSuccessPageWithGroupingKeys(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[4])
self._mock_time.return_value = 0.007
results.DidRunPage(test_story_set.stories[4])
expected = ("[ RUN ] bench/http://www.fus.com/@{'1': '2'}\n"
"[ OK ] bench/http://www.fus.com/@{'1': '2'} (7 ms)\n"
"[ PASSED ] 1 test.\n\n")
self.assertOutputEquals(expected)
def testSingleFailedPage(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[0])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'[ FAILED ] bench/http://www.foo.com/ (0 ms)\n'
'[ PASSED ] 0 tests.\n'
'[ FAILED ] 1 test, listed below:\n'
'[ FAILED ] bench/http://www.foo.com/\n\n'
'1 FAILED TEST\n\n')
self.assertOutputEquals(expected)
def testSingleFailedPageWithGroupingKeys(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[4])
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[4])
expected = ("[ RUN ] bench/http://www.fus.com/@{'1': '2'}\n"
"[ FAILED ] bench/http://www.fus.com/@{'1': '2'} (0 ms)\n"
"[ PASSED ] 0 tests.\n"
"[ FAILED ] 1 test, listed below:\n"
"[ FAILED ] bench/http://www.fus.com/@{'1': '2'}\n\n"
"1 FAILED TEST\n\n")
self.assertOutputEquals(expected)
def testSingleSkippedPage(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
self._mock_time.return_value = 0.007
results.Skip('Page skipped for testing reason')
results.DidRunPage(test_story_set.stories[0])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'== Skipping story: Page skipped for testing reason ==\n'
'[ SKIPPED ] bench/http://www.foo.com/ (7 ms)\n'
'[ PASSED ] 0 tests.\n'
'[ SKIPPED ] 1 test.\n\n')
self.assertOutputEquals(expected)
def testPassAndFailedPages(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
self._mock_time.return_value = 0.007
results.DidRunPage(test_story_set.stories[0])
results.WillRunPage(test_story_set.stories[1])
self._mock_time.return_value = 0.009
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[1])
results.WillRunPage(test_story_set.stories[2])
self._mock_time.return_value = 0.015
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[2])
results.WillRunPage(test_story_set.stories[3])
self._mock_time.return_value = 0.020
results.DidRunPage(test_story_set.stories[3])
results.WillRunPage(test_story_set.stories[4])
self._mock_time.return_value = 0.025
results.DidRunPage(test_story_set.stories[4])
results.WillRunPage(test_story_set.stories[5])
self._mock_time.return_value = 0.030
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[5])
expected = ("[ RUN ] bench/http://www.foo.com/\n"
"[ OK ] bench/http://www.foo.com/ (7 ms)\n"
"[ RUN ] bench/http://www.bar.com/\n"
"[ FAILED ] bench/http://www.bar.com/ (2 ms)\n"
"[ RUN ] bench/http://www.baz.com/\n"
"[ FAILED ] bench/http://www.baz.com/ (6 ms)\n"
"[ RUN ] bench/http://www.roz.com/\n"
"[ OK ] bench/http://www.roz.com/ (5 ms)\n"
"[ RUN ] bench/http://www.fus.com/@{'1': '2'}\n"
"[ OK ] bench/http://www.fus.com/@{'1': '2'} (5 ms)\n"
"[ RUN ] bench/http://www.ro.com/@{'1': '2'}\n"
"[ FAILED ] bench/http://www.ro.com/@{'1': '2'} (5 ms)\n"
"[ PASSED ] 3 tests.\n"
"[ FAILED ] 3 tests, listed below:\n"
"[ FAILED ] bench/http://www.bar.com/\n"
"[ FAILED ] bench/http://www.baz.com/\n"
"[ FAILED ] bench/http://www.ro.com/@{'1': '2'}\n\n"
"3 FAILED TESTS\n\n")
self.assertOutputEquals(expected)
def testStreamingResults(self):
test_story_set = _MakeStorySet()
with self._MakePageTestResults() as results:
results.WillRunPage(test_story_set.stories[0])
self._mock_time.return_value = 0.007
results.DidRunPage(test_story_set.stories[0])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'[ OK ] bench/http://www.foo.com/ (7 ms)\n')
self.assertOutputEquals(expected)
results.WillRunPage(test_story_set.stories[1])
self._mock_time.return_value = 0.009
results.Fail('test fails')
results.DidRunPage(test_story_set.stories[1])
expected = ('[ RUN ] bench/http://www.foo.com/\n'
'[ OK ] bench/http://www.foo.com/ (7 ms)\n'
'[ RUN ] bench/http://www.bar.com/\n'
'[ FAILED ] bench/http://www.bar.com/ (2 ms)\n')
self.assertOutputEquals(expected) | 0.510496 | 0.236318 |
import advection_fv4.interface_f as interface_f
import mesh.array_indexer as ai
def fluxes(my_data, rp, dt):
"""Construct the fluxes through the interfaces for the linear advection
equation:
.. math::
a_t + u a_x + v a_y = 0
We use a fourth-order Godunov method to construct the interface
states, using Runge-Kutta integration. Since this is 4th-order,
we need to be aware of the difference between a face-average and
face-center for the fluxes.
In the pure advection case, there is no Riemann problem we need to
solve -- we just simply do upwinding. So there is only one 'state'
at each interface, and the zone the information comes from depends
on the sign of the velocity.
Our convection is that the fluxes are going to be defined on the
left edge of the computational zones::
| | | |
| | | |
-+------+------+------+------+------+------+--
| i-1 | i | i+1 |
a_l,i a_r,i a_l,i+1
a_r,i and a_l,i+1 are computed using the information in
zone i,j.
Parameters
----------
my_data : FV object
The data object containing the grid and advective scalar that
we are advecting.
rp : RuntimeParameters object
The runtime parameters for the simulation
dt : float
The timestep we are advancing through.
scalar_name : str
The name of the variable contained in my_data that we are
advecting
Returns
-------
out : ndarray, ndarray
The fluxes averaged over the x and y faces
"""
myg = my_data.grid
a = my_data.get_var("density")
# get the advection velocities
u = rp.get_param("advection.u")
v = rp.get_param("advection.v")
limiter = rp.get_param("advection.limiter")
# interpolate cell-average a to face-averaged a on interfaces in each
# dimension -- this is MC Eq. 17
if limiter == 0:
# no limiting
a_x = myg.scratch_array()
a_x.v(buf=1)[:, :] = 7./12.*(a.ip(-1, buf=1) + a.v(buf=1)) - \
1./12.*(a.ip(-2, buf=1) + a.ip(1, buf=1))
a_y = myg.scratch_array()
a_y.v(buf=1)[:, :] = 7./12.*(a.jp(-1, buf=1) + a.v(buf=1)) - \
1./12.*(a.jp(-2, buf=1) + a.jp(1, buf=1))
else:
a_l, a_r = interface_f.states(a, myg.qx, myg.qy, myg.ng, 1)
if u > 0:
a_x = ai.ArrayIndexer(d=a_l, grid=myg)
else:
a_x = ai.ArrayIndexer(d=a_r, grid=myg)
a_l, a_r = interface_f.states(a, myg.qx, myg.qy, myg.ng, 2)
if v > 0:
a_y = ai.ArrayIndexer(d=a_l, grid=myg)
else:
a_y = ai.ArrayIndexer(d=a_r, grid=myg)
# calculate the face-centered value a using the transverse Laplacian
# this is MC Eq. 18, 19
a_x_cc = myg.scratch_array()
bufx = (0, 1, 0, 0)
a_x_cc.v(buf=bufx)[:, :] = a_x.v(buf=bufx) - \
1./24*(a_x.jp(-1, buf=bufx) - 2*a_x.v(buf=bufx) + a_x.jp(1, buf=bufx))
a_y_cc = myg.scratch_array()
bufy = (0, 0, 0, 1)
a_y_cc.v(buf=bufy)[:, :] = a_y.v(buf=bufy) - \
1./24*(a_y.ip(-1, buf=bufy) - 2*a_y.v(buf=bufy) + a_y.ip(1, buf=bufy))
# compute the face-averaged fluxes -- this is MC Eq. 20
F_x = myg.scratch_array()
F_x_avg = u*a_x
F_x.v(buf=bufx)[:, :] = u*a_x_cc.v(buf=bufx) + \
1./24*(F_x_avg.jp(-1, buf=bufx) - 2*F_x_avg.v(buf=bufx) + F_x_avg.jp(1, buf=bufx))
F_y = myg.scratch_array()
F_y_avg = v*a_y
F_y.v(buf=bufy)[:, :] = v*a_y_cc.v(buf=bufy) + \
1./24*(F_y_avg.ip(-1, buf=bufy) - 2*F_y_avg.v(buf=bufy) + F_y_avg.ip(1, buf=bufy))
return F_x, F_y | advection_fv4/fluxes.py | import advection_fv4.interface_f as interface_f
import mesh.array_indexer as ai
def fluxes(my_data, rp, dt):
"""Construct the fluxes through the interfaces for the linear advection
equation:
.. math::
a_t + u a_x + v a_y = 0
We use a fourth-order Godunov method to construct the interface
states, using Runge-Kutta integration. Since this is 4th-order,
we need to be aware of the difference between a face-average and
face-center for the fluxes.
In the pure advection case, there is no Riemann problem we need to
solve -- we just simply do upwinding. So there is only one 'state'
at each interface, and the zone the information comes from depends
on the sign of the velocity.
Our convection is that the fluxes are going to be defined on the
left edge of the computational zones::
| | | |
| | | |
-+------+------+------+------+------+------+--
| i-1 | i | i+1 |
a_l,i a_r,i a_l,i+1
a_r,i and a_l,i+1 are computed using the information in
zone i,j.
Parameters
----------
my_data : FV object
The data object containing the grid and advective scalar that
we are advecting.
rp : RuntimeParameters object
The runtime parameters for the simulation
dt : float
The timestep we are advancing through.
scalar_name : str
The name of the variable contained in my_data that we are
advecting
Returns
-------
out : ndarray, ndarray
The fluxes averaged over the x and y faces
"""
myg = my_data.grid
a = my_data.get_var("density")
# get the advection velocities
u = rp.get_param("advection.u")
v = rp.get_param("advection.v")
limiter = rp.get_param("advection.limiter")
# interpolate cell-average a to face-averaged a on interfaces in each
# dimension -- this is MC Eq. 17
if limiter == 0:
# no limiting
a_x = myg.scratch_array()
a_x.v(buf=1)[:, :] = 7./12.*(a.ip(-1, buf=1) + a.v(buf=1)) - \
1./12.*(a.ip(-2, buf=1) + a.ip(1, buf=1))
a_y = myg.scratch_array()
a_y.v(buf=1)[:, :] = 7./12.*(a.jp(-1, buf=1) + a.v(buf=1)) - \
1./12.*(a.jp(-2, buf=1) + a.jp(1, buf=1))
else:
a_l, a_r = interface_f.states(a, myg.qx, myg.qy, myg.ng, 1)
if u > 0:
a_x = ai.ArrayIndexer(d=a_l, grid=myg)
else:
a_x = ai.ArrayIndexer(d=a_r, grid=myg)
a_l, a_r = interface_f.states(a, myg.qx, myg.qy, myg.ng, 2)
if v > 0:
a_y = ai.ArrayIndexer(d=a_l, grid=myg)
else:
a_y = ai.ArrayIndexer(d=a_r, grid=myg)
# calculate the face-centered value a using the transverse Laplacian
# this is MC Eq. 18, 19
a_x_cc = myg.scratch_array()
bufx = (0, 1, 0, 0)
a_x_cc.v(buf=bufx)[:, :] = a_x.v(buf=bufx) - \
1./24*(a_x.jp(-1, buf=bufx) - 2*a_x.v(buf=bufx) + a_x.jp(1, buf=bufx))
a_y_cc = myg.scratch_array()
bufy = (0, 0, 0, 1)
a_y_cc.v(buf=bufy)[:, :] = a_y.v(buf=bufy) - \
1./24*(a_y.ip(-1, buf=bufy) - 2*a_y.v(buf=bufy) + a_y.ip(1, buf=bufy))
# compute the face-averaged fluxes -- this is MC Eq. 20
F_x = myg.scratch_array()
F_x_avg = u*a_x
F_x.v(buf=bufx)[:, :] = u*a_x_cc.v(buf=bufx) + \
1./24*(F_x_avg.jp(-1, buf=bufx) - 2*F_x_avg.v(buf=bufx) + F_x_avg.jp(1, buf=bufx))
F_y = myg.scratch_array()
F_y_avg = v*a_y
F_y.v(buf=bufy)[:, :] = v*a_y_cc.v(buf=bufy) + \
1./24*(F_y_avg.ip(-1, buf=bufy) - 2*F_y_avg.v(buf=bufy) + F_y_avg.ip(1, buf=bufy))
return F_x, F_y | 0.815894 | 0.763175 |
import torch
from torchdet3d.evaluation import compute_metrics_per_cls
from torchdet3d.losses import WingLoss, ADD_loss, DiagLoss
from torchdet3d.builders import (build_loss, build_optimizer, build_scheduler,
build_model, AVAILABLE_LOSS, AVAILABLE_OPTIMS, AVAILABLE_SCHEDS)
from torchdet3d.utils import read_py_config
class TestCasesPipeline:
gt_kps = torch.rand(128,9,2)
test_kps = torch.rand(128,9,2, requires_grad=True)
gt_cats = torch.randint(0,9,(128,))
test_cats = torch.rand(128,9)
config = read_py_config("./configs/default_config.py")
def test_metrics(self):
cls_metrics, ADD, SADD, IOU, acc = compute_metrics_per_cls(self.test_kps, self.gt_kps,
self.test_cats, self.gt_cats)
assert 0 <= ADD <= 1 and 0 <= SADD <= 1 and 0 <= IOU <= 1 and 0 <= acc <= 1
assert len(cls_metrics) == 9 and len(cls_metrics[0]) == 5
def test_losses(self):
for loss in [WingLoss(), ADD_loss(), DiagLoss()]:
input_ = torch.sigmoid(torch.randn(512, 9, 2, requires_grad=True))
target = torch.sigmoid(torch.randn(512, 9, 2))
output = loss(input_, target)
assert not torch.any(torch.isnan(output))
output.backward()
def test_builders(self):
for loss_ in AVAILABLE_LOSS:
if loss_ != 'cross_entropy':
self.config['loss']['names']=[loss_, 'cross_entropy']
self.config.loss.coeffs=([1.],[1.])
regress_criterions, class_criterions = build_loss(self.config)
assert len(regress_criterions) == 1 and len(class_criterions) == 1
model = build_model(self.config)
assert model is not None
for optim_ in AVAILABLE_OPTIMS:
self.config['optim']['name'] = optim_
optimizer = build_optimizer(self.config, model)
assert optimizer is not None
for schd in AVAILABLE_SCHEDS:
self.config['scheduler']['name'] = schd
scheduler = build_scheduler(self.config, optimizer)
assert scheduler is not None
def test_random_inference(self):
model = build_model(self.config)
image = torch.rand(128,3,224,224)
kp, cat = model(image, self.gt_cats)
assert kp.shape == (128,9,2)
assert cat.shape == (128,9) | tests/test_pipeline.py | import torch
from torchdet3d.evaluation import compute_metrics_per_cls
from torchdet3d.losses import WingLoss, ADD_loss, DiagLoss
from torchdet3d.builders import (build_loss, build_optimizer, build_scheduler,
build_model, AVAILABLE_LOSS, AVAILABLE_OPTIMS, AVAILABLE_SCHEDS)
from torchdet3d.utils import read_py_config
class TestCasesPipeline:
gt_kps = torch.rand(128,9,2)
test_kps = torch.rand(128,9,2, requires_grad=True)
gt_cats = torch.randint(0,9,(128,))
test_cats = torch.rand(128,9)
config = read_py_config("./configs/default_config.py")
def test_metrics(self):
cls_metrics, ADD, SADD, IOU, acc = compute_metrics_per_cls(self.test_kps, self.gt_kps,
self.test_cats, self.gt_cats)
assert 0 <= ADD <= 1 and 0 <= SADD <= 1 and 0 <= IOU <= 1 and 0 <= acc <= 1
assert len(cls_metrics) == 9 and len(cls_metrics[0]) == 5
def test_losses(self):
for loss in [WingLoss(), ADD_loss(), DiagLoss()]:
input_ = torch.sigmoid(torch.randn(512, 9, 2, requires_grad=True))
target = torch.sigmoid(torch.randn(512, 9, 2))
output = loss(input_, target)
assert not torch.any(torch.isnan(output))
output.backward()
def test_builders(self):
for loss_ in AVAILABLE_LOSS:
if loss_ != 'cross_entropy':
self.config['loss']['names']=[loss_, 'cross_entropy']
self.config.loss.coeffs=([1.],[1.])
regress_criterions, class_criterions = build_loss(self.config)
assert len(regress_criterions) == 1 and len(class_criterions) == 1
model = build_model(self.config)
assert model is not None
for optim_ in AVAILABLE_OPTIMS:
self.config['optim']['name'] = optim_
optimizer = build_optimizer(self.config, model)
assert optimizer is not None
for schd in AVAILABLE_SCHEDS:
self.config['scheduler']['name'] = schd
scheduler = build_scheduler(self.config, optimizer)
assert scheduler is not None
def test_random_inference(self):
model = build_model(self.config)
image = torch.rand(128,3,224,224)
kp, cat = model(image, self.gt_cats)
assert kp.shape == (128,9,2)
assert cat.shape == (128,9) | 0.772101 | 0.449091 |
import time
import numpy as np
import theano
floatX = theano.config.floatX
class AgentMemory(object):
def __init__(self, rng, stateShape, phiLength=4, memorySize=10000, discountRate = 1.0, numTasks = 1):
"""
Arguments:
stateShape - tuple containing the dimensions of the experiences being stored
phiLength - number of images in a state.
memorySize - The number of experiences that can be stored
An experience is a single frame, a state is several frames (phiLength) in a single numpy tensor
"""
self.currentMemoryIndex = 0
self.numberOfExperiences = 0
self.memorySize = memorySize
self.stateShape = stateShape
self.phiLength = phiLength
self.numTasks = numTasks
self.rng = rng
self.discountRate = discountRate
self.taskSampleCount = np.zeros(self.numTasks, dtype='int32')
self.totalTaskSampleCount = np.zeros(self.numTasks, dtype='int32')
self.stateMemory = np.zeros((self.memorySize,) + self.stateShape , dtype = 'uint8')
self.rewardMemory = np.zeros(self.memorySize, dtype = floatX)
self.actionMemory = np.zeros(self.memorySize, dtype='int32')
self.terminalMemory = np.zeros(self.memorySize, dtype='int32')
self.taskMemory = -1 * np.ones(self.memorySize, dtype='int32')
def addFrame(self, frame, memoryIndex = None):
assert( memoryIndex == None or ( (memoryIndex < memorySize) and (memoryIndex >= 0) ) )
if memoryIndex == None:
memoryIndex = self.currentMemoryIndex
assert(self.stateShape[0] == frame.shape[0])
assert(self.stateShape[1] == frame.shape[1])
self.stateMemory[memoryIndex, ...] = frame
def addExperience(self, reward, action, terminal = 0, taskIndex = 0, memoryIndex = None):
assert( memoryIndex == None or ( (memoryIndex < memorySize) and (memoryIndex >= 0) ) )
if memoryIndex == None:
memoryIndex = self.currentMemoryIndex
self.actionMemory[memoryIndex] = action
self.rewardMemory[memoryIndex] = reward
self.terminalMemory[memoryIndex] = terminal
if self.taskMemory[memoryIndex] != -1:
#Overwritting another memory
self.taskSampleCount[self.taskMemory[memoryIndex]] -= 1
self.taskSampleCount[taskIndex] += 1
self.totalTaskSampleCount[taskIndex] += 1
self.taskMemory[memoryIndex] = taskIndex
self.currentMemoryIndex = (self.currentMemoryIndex + 1) % self.memorySize
self.numberOfExperiences += 1
def addSample(self, image, action, reward, terminal, taskIndex):
self.addFrame(image)
self.addExperience(reward, action, terminal, taskIndex)
def getCurrentPhi(self, image):
phiIndices = np.arange(self.currentMemoryIndex - self.phiLength + 1, self.currentMemoryIndex) % self.memorySize
phi = np.empty((self.phiLength,) + self.stateShape, dtype = floatX)
phi[0:self.phiLength - 1] = self.stateMemory[phiIndices]
phi[-1] = image
return phi
def getPhiIndices(self, index = None):
assert index < self.memorySize
assert index < self.numberOfExperiences
if index == None:
index = self.currentMemoryIndex
startingIndex = (index - self.phiLength + 1) % self.memorySize
phiIndices = [(startingIndex + i) % self.memorySize for i in xrange(self.phiLength)]
return phiIndices
def getPhi(self, index = None):
phiIndices = self.getPhiIndices(index)
phi = np.array([self.stateMemory[i] for i in phiIndices])
return phi
def getRandomExperienceBatch(self, batchSize, kReturnLength = 1, taskIndex = None):
if kReturnLength < 1:
return None
if batchSize > self.numberOfExperiences - self.phiLength + 1:
return None
if self.taskSampleCount[taskIndex] < batchSize:
return None
if taskIndex is not None and self.taskSampleCount[taskIndex] < batchSize:
return None
experienceStateShape = (batchSize, self.phiLength) + self.stateShape
batchStates = np.zeros(experienceStateShape, dtype='uint8')
batchNextStates = np.zeros(experienceStateShape, dtype='uint8')
batchRewards = np.zeros((batchSize, 1), dtype=floatX)
batchActions = np.zeros((batchSize, 1), dtype='int32')
batchNextActions= np.zeros((batchSize, 1), dtype='int32')
batchTerminals = np.zeros((batchSize, 1), dtype='bool')
batchGammas = np.zeros((batchSize, 1), dtype=floatX)
count = 0
maxIndex = min(self.numberOfExperiences, self.memorySize)
while count < batchSize:
index = self.rng.randint(0, maxIndex - 1)
phiIndices = self.getPhiIndices(index)
#Picked a sample too close to start of episode - sample state crosses episode boundary
if True in [self.terminalMemory[i] for i in phiIndices]:
continue
#Sample is not of the current task
if taskIndex != None and self.taskMemory[index] != taskIndex:
continue
#There is a region of experience we dont want to sample from due to filling in new experience in the replay
#This area is the region between the current memory index minus the desired return length and
#the current memory index plus the phi length
#And memories slightly over the current index will have their phi states invalidated by going between new and old memories
#And memories kReturnLength behind the current index cant be sampled as they dont have k steps to form a full k step return
upperBound = self.currentMemoryIndex + self.phiLength
lowerBound = self.currentMemoryIndex - kReturnLength
if upperBound % self.memorySize < upperBound:
#looped over end of circular buffer by finding starting acceptable index thats above the upper bound
if index >= lowerBound or index <= upperBound % self.memorySize:
continue
else:
if lowerBound % self.memorySize > lowerBound:
#Looped from start to end of circular buffer by subtracting kReturnLength when finding the lower bound
if index >= lowerBound % self.memorySize or index <= upperBound:
continue
elif index <= upperBound and index >= lowerBound:
continue
currentReturn = 0.0
currentDiscount = 1.0
currentIndex = index
for i in xrange(0, kReturnLength):
currentIndex = (index + i) % self.memorySize
currentReturn += currentDiscount * self.rewardMemory[currentIndex]
currentDiscount *= self.discountRate
endIndex = (currentIndex + 1) % self.memorySize
if self.terminalMemory[endIndex] == True:
break
batchStates[count] = self.getPhi(index)
batchNextStates[count] = self.getPhi(endIndex)
batchRewards[count] = currentReturn
batchGammas[count] = currentDiscount
batchActions[count] = self.actionMemory[index]
batchNextActions[count]= self.actionMemory[endIndex]
batchTerminals[count] = self.terminalMemory[endIndex]
count += 1
return batchStates, batchActions, batchRewards, batchGammas, batchNextStates, batchNextActions, batchTerminals
def getLowestSampledTask(self):
return np.argmin(self.taskSampleCount)
def __len__(self):
""" Return the total number of available data items. """
return max(0, min(self.numberOfExperiences - self.phiLength, self.memorySize - self.phiLength))
def main():
m = AgentMemory((3,2), 4, 10)
for i in xrange(4):
frame = np.random.randn(3,2)
m.addFrame(frame)
m.addExperience(1, 0, False)
print "StateMemory:"
print m.stateMemory
print "\nPhi:\n"
print m.getPhi()
for i in xrange(15):
frame = np.random.randn(3,2)
m.addFrame(frame)
m.addExperience(1, 0, False)
frame = np.random.randn(3,2)
m.addFrame(frame)
m.addExperience(0, 1, True)
print "\nCurrent Memory Index:" + str(m.currentMemoryIndex) + "\n"
frame = np.random.randn(3,2)
m.addFrame(frame)
print m.stateMemory
print "\nCurrent Phi crossing end of memory:\n" + str(m.getPhi())
return m
if __name__ == "__main__":
main() | Agents/AgentMemory.py | import time
import numpy as np
import theano
floatX = theano.config.floatX
class AgentMemory(object):
def __init__(self, rng, stateShape, phiLength=4, memorySize=10000, discountRate = 1.0, numTasks = 1):
"""
Arguments:
stateShape - tuple containing the dimensions of the experiences being stored
phiLength - number of images in a state.
memorySize - The number of experiences that can be stored
An experience is a single frame, a state is several frames (phiLength) in a single numpy tensor
"""
self.currentMemoryIndex = 0
self.numberOfExperiences = 0
self.memorySize = memorySize
self.stateShape = stateShape
self.phiLength = phiLength
self.numTasks = numTasks
self.rng = rng
self.discountRate = discountRate
self.taskSampleCount = np.zeros(self.numTasks, dtype='int32')
self.totalTaskSampleCount = np.zeros(self.numTasks, dtype='int32')
self.stateMemory = np.zeros((self.memorySize,) + self.stateShape , dtype = 'uint8')
self.rewardMemory = np.zeros(self.memorySize, dtype = floatX)
self.actionMemory = np.zeros(self.memorySize, dtype='int32')
self.terminalMemory = np.zeros(self.memorySize, dtype='int32')
self.taskMemory = -1 * np.ones(self.memorySize, dtype='int32')
def addFrame(self, frame, memoryIndex = None):
assert( memoryIndex == None or ( (memoryIndex < memorySize) and (memoryIndex >= 0) ) )
if memoryIndex == None:
memoryIndex = self.currentMemoryIndex
assert(self.stateShape[0] == frame.shape[0])
assert(self.stateShape[1] == frame.shape[1])
self.stateMemory[memoryIndex, ...] = frame
def addExperience(self, reward, action, terminal = 0, taskIndex = 0, memoryIndex = None):
assert( memoryIndex == None or ( (memoryIndex < memorySize) and (memoryIndex >= 0) ) )
if memoryIndex == None:
memoryIndex = self.currentMemoryIndex
self.actionMemory[memoryIndex] = action
self.rewardMemory[memoryIndex] = reward
self.terminalMemory[memoryIndex] = terminal
if self.taskMemory[memoryIndex] != -1:
#Overwritting another memory
self.taskSampleCount[self.taskMemory[memoryIndex]] -= 1
self.taskSampleCount[taskIndex] += 1
self.totalTaskSampleCount[taskIndex] += 1
self.taskMemory[memoryIndex] = taskIndex
self.currentMemoryIndex = (self.currentMemoryIndex + 1) % self.memorySize
self.numberOfExperiences += 1
def addSample(self, image, action, reward, terminal, taskIndex):
self.addFrame(image)
self.addExperience(reward, action, terminal, taskIndex)
def getCurrentPhi(self, image):
phiIndices = np.arange(self.currentMemoryIndex - self.phiLength + 1, self.currentMemoryIndex) % self.memorySize
phi = np.empty((self.phiLength,) + self.stateShape, dtype = floatX)
phi[0:self.phiLength - 1] = self.stateMemory[phiIndices]
phi[-1] = image
return phi
def getPhiIndices(self, index = None):
assert index < self.memorySize
assert index < self.numberOfExperiences
if index == None:
index = self.currentMemoryIndex
startingIndex = (index - self.phiLength + 1) % self.memorySize
phiIndices = [(startingIndex + i) % self.memorySize for i in xrange(self.phiLength)]
return phiIndices
def getPhi(self, index = None):
phiIndices = self.getPhiIndices(index)
phi = np.array([self.stateMemory[i] for i in phiIndices])
return phi
def getRandomExperienceBatch(self, batchSize, kReturnLength = 1, taskIndex = None):
if kReturnLength < 1:
return None
if batchSize > self.numberOfExperiences - self.phiLength + 1:
return None
if self.taskSampleCount[taskIndex] < batchSize:
return None
if taskIndex is not None and self.taskSampleCount[taskIndex] < batchSize:
return None
experienceStateShape = (batchSize, self.phiLength) + self.stateShape
batchStates = np.zeros(experienceStateShape, dtype='uint8')
batchNextStates = np.zeros(experienceStateShape, dtype='uint8')
batchRewards = np.zeros((batchSize, 1), dtype=floatX)
batchActions = np.zeros((batchSize, 1), dtype='int32')
batchNextActions= np.zeros((batchSize, 1), dtype='int32')
batchTerminals = np.zeros((batchSize, 1), dtype='bool')
batchGammas = np.zeros((batchSize, 1), dtype=floatX)
count = 0
maxIndex = min(self.numberOfExperiences, self.memorySize)
while count < batchSize:
index = self.rng.randint(0, maxIndex - 1)
phiIndices = self.getPhiIndices(index)
#Picked a sample too close to start of episode - sample state crosses episode boundary
if True in [self.terminalMemory[i] for i in phiIndices]:
continue
#Sample is not of the current task
if taskIndex != None and self.taskMemory[index] != taskIndex:
continue
#There is a region of experience we dont want to sample from due to filling in new experience in the replay
#This area is the region between the current memory index minus the desired return length and
#the current memory index plus the phi length
#And memories slightly over the current index will have their phi states invalidated by going between new and old memories
#And memories kReturnLength behind the current index cant be sampled as they dont have k steps to form a full k step return
upperBound = self.currentMemoryIndex + self.phiLength
lowerBound = self.currentMemoryIndex - kReturnLength
if upperBound % self.memorySize < upperBound:
#looped over end of circular buffer by finding starting acceptable index thats above the upper bound
if index >= lowerBound or index <= upperBound % self.memorySize:
continue
else:
if lowerBound % self.memorySize > lowerBound:
#Looped from start to end of circular buffer by subtracting kReturnLength when finding the lower bound
if index >= lowerBound % self.memorySize or index <= upperBound:
continue
elif index <= upperBound and index >= lowerBound:
continue
currentReturn = 0.0
currentDiscount = 1.0
currentIndex = index
for i in xrange(0, kReturnLength):
currentIndex = (index + i) % self.memorySize
currentReturn += currentDiscount * self.rewardMemory[currentIndex]
currentDiscount *= self.discountRate
endIndex = (currentIndex + 1) % self.memorySize
if self.terminalMemory[endIndex] == True:
break
batchStates[count] = self.getPhi(index)
batchNextStates[count] = self.getPhi(endIndex)
batchRewards[count] = currentReturn
batchGammas[count] = currentDiscount
batchActions[count] = self.actionMemory[index]
batchNextActions[count]= self.actionMemory[endIndex]
batchTerminals[count] = self.terminalMemory[endIndex]
count += 1
return batchStates, batchActions, batchRewards, batchGammas, batchNextStates, batchNextActions, batchTerminals
def getLowestSampledTask(self):
return np.argmin(self.taskSampleCount)
def __len__(self):
""" Return the total number of available data items. """
return max(0, min(self.numberOfExperiences - self.phiLength, self.memorySize - self.phiLength))
def main():
m = AgentMemory((3,2), 4, 10)
for i in xrange(4):
frame = np.random.randn(3,2)
m.addFrame(frame)
m.addExperience(1, 0, False)
print "StateMemory:"
print m.stateMemory
print "\nPhi:\n"
print m.getPhi()
for i in xrange(15):
frame = np.random.randn(3,2)
m.addFrame(frame)
m.addExperience(1, 0, False)
frame = np.random.randn(3,2)
m.addFrame(frame)
m.addExperience(0, 1, True)
print "\nCurrent Memory Index:" + str(m.currentMemoryIndex) + "\n"
frame = np.random.randn(3,2)
m.addFrame(frame)
print m.stateMemory
print "\nCurrent Phi crossing end of memory:\n" + str(m.getPhi())
return m
if __name__ == "__main__":
main() | 0.52975 | 0.512266 |
from conduit.article.models import Article
from conduit.auth.models import User
from conduit.auth.tests.test_auth_views import USER_ONE_JWT
from conduit.auth.tests.test_auth_views import USER_TWO_JWT
from sqlalchemy.orm.session import Session
from webtest import TestApp
def test_feed(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles/feed."""
res = testapp.get(
"/api/articles/feed",
headers={"Authorization": f"Token {USER_TWO_JWT}"},
status=200,
)
assert res.json == {
"articlesCount": 2,
"articles": [
{
"slug": "bar",
"title": "Bär",
"description": "Bär desc",
"body": "Bär body",
"createdAt": "2019-03-03T03:03:03.000Z",
"updatedAt": "2019-04-04T04:04:04.000Z",
"tagList": [],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "one",
"bio": None,
"image": None,
"following": True,
},
},
{
"slug": "foo",
"title": "Foö",
"description": "Foö desc",
"body": "Foö body",
"createdAt": "2019-01-01T01:01:01.000Z",
"updatedAt": "2019-02-02T02:02:02.000Z",
"tagList": ["dogs", "cats"],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "one",
"bio": None,
"image": None,
"following": True,
},
},
],
}
def test_GET_articles(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles."""
res = testapp.get("/api/articles", status=200)
assert res.json == {
"articlesCount": 3,
"articles": [
{
"slug": "i-am-johnjacob",
"title": "I am <NAME>",
"description": "johnjacob desc",
"body": "johnjacob body",
"createdAt": "2019-05-05T05:05:05.000Z",
"updatedAt": "2019-06-06T06:06:06.000Z",
"tagList": [],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "johnjacob",
"bio": None,
"image": None,
"following": False,
},
},
{
"slug": "bar",
"title": "Bär",
"description": "Bär desc",
"body": "Bär body",
"createdAt": "2019-03-03T03:03:03.000Z",
"updatedAt": "2019-04-04T04:04:04.000Z",
"tagList": [],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "one",
"bio": None,
"image": None,
"following": False,
},
},
{
"slug": "foo",
"title": "Foö",
"description": "Foö desc",
"body": "Foö body",
"createdAt": "2019-01-01T01:01:01.000Z",
"updatedAt": "2019-02-02T02:02:02.000Z",
"tagList": ["dogs", "cats"],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "one",
"bio": None,
"image": None,
"following": False,
},
},
],
}
def test_GET_articles_filter_by_author(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles, filter by author."""
res = testapp.get("/api/articles?author=one", status=200)
assert res.json["articlesCount"] == 2
assert res.json["articles"][0]["slug"] == "bar"
assert res.json["articles"][1]["slug"] == "foo"
res = testapp.get("/api/articles?author=two", status=200)
assert res.json["articlesCount"] == 0
def test_GET_articles_filter_by_tag(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles, filter by author."""
res = testapp.get("/api/articles?tag=dogs", status=200)
assert res.json["articlesCount"] == 1
assert res.json["articles"][0]["slug"] == "foo"
def test_GET_articles_limit(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles, but limit to N results."""
res = testapp.get("/api/articles", status=200)
assert res.json["articlesCount"] == 3
res = testapp.get("/api/articles?limit=2", status=200)
assert res.json["articlesCount"] == 2
def test_GET_articles_offset(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles, but limit to N results, offset by M results."""
res = testapp.get("/api/articles?limit=2", status=200)
assert res.json["articles"][1]["title"] == "Bär"
res = testapp.get("/api/articles?limit=2&offset=1", status=200)
assert res.json["articles"][1]["title"] == "Foö"
def test_GET_article(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles/{slug}."""
res = testapp.get("/api/articles/foo", status=200)
assert res.json == {
"article": {
"author": {
"bio": None,
"following": False,
"image": None,
"username": "one",
},
"body": "Foö body",
"createdAt": "2019-01-01T01:01:01.000Z",
"description": "Foö desc",
"favorited": False,
"favoritesCount": 0,
"slug": "foo",
"tagList": ["dogs", "cats"],
"title": "Foö",
"updatedAt": "2019-02-02T02:02:02.000Z",
}
}
def test_GET_article_authenticated(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles/{slug}."""
res = testapp.get(
"/api/articles/foo",
headers={"Authorization": f"Token {USER_TWO_JWT}"},
status=200,
)
assert res.json == {
"article": {
"author": {
"bio": None,
"following": True,
"image": None,
"username": "one",
},
"body": "Foö body",
"createdAt": "2019-01-01T01:01:01.000Z",
"description": "Foö desc",
"favorited": False,
"favoritesCount": 0,
"slug": "foo",
"tagList": ["dogs", "cats"],
"title": "Foö",
"updatedAt": "2019-02-02T02:02:02.000Z",
}
}
def test_POST_article(testapp: TestApp, democontent: None) -> None:
"""Test POST /api/articles."""
res = testapp.post_json(
"/api/articles",
{
"article": {
"title": "A title",
"description": "A description",
"body": "A body",
"tagList": ["one", "two"],
}
},
headers={"Authorization": f"Token {USER_TWO_JWT}"},
status=201,
)
assert res.json["article"]["author"]["username"] == "two"
assert res.json["article"]["title"] == "A title"
assert res.json["article"]["description"] == "A description"
assert res.json["article"]["body"] == "A body"
assert res.json["article"]["tagList"] == ["one", "two"]
# TODO: mock createdAt and updatedAt to be able to compare entire output
# "article": {
# "author": {"bio": "", "following": True, "image": "", "username": "two"},
# "body": "A body",
# "createdAt": "2019-01-01T00:00:00Z",
# "description": "A description",
# "favorited": False,
# "favoritesCount": 0,
# "slug": "a-title",
# "tagList": ["foo", "bar"], # TODO: taglist support
# "title": "A title",
# "createdAt": "2019-01-01T00:00:00Z",
# }
# }
def test_PUT_article(testapp: TestApp, democontent: None) -> None:
"""Test PUT /api/articles/{slug}."""
res = testapp.put_json(
"/api/articles/foo",
{
"article": {
"title": "New title",
"description": "New description",
"body": "New body",
}
},
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
assert res.json == {
"article": {
"author": {
"bio": None,
"following": False,
"image": None,
"username": "one",
},
"body": "New body",
"createdAt": "2019-01-01T01:01:01.000Z",
"description": "New description",
"favorited": False,
"favoritesCount": 0,
"slug": "foo",
"tagList": ["dogs", "cats"],
"title": "New title",
"updatedAt": "2019-02-02T02:02:02.000Z",
}
}
def test_PUT_article_empty_request(testapp: TestApp, democontent: None) -> None:
"""Test PUT /api/articles/{slug} with empty request, nothing happens."""
testapp.put_json(
"/api/articles/foo",
{"article": {}},
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
def test_DELETE_article(testapp: TestApp, db: Session, democontent: None) -> None:
"""Test DELETE /api/articles/{slug}."""
assert Article.by_slug("foo", db=db) is not None
testapp.delete(
"/api/articles/foo",
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
assert Article.by_slug("foo", db=db) is None
def test_favorite_unfavorite_article(
testapp: TestApp, db: Session, democontent: None
) -> None:
"""Test POST/DELETE /api/articles/{slug}/favorite."""
user = User.by_username("one", db=db)
assert user.favorites == [] # type: ignore
res = testapp.post_json(
"/api/articles/foo/favorite",
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
assert res.json["article"]["favorited"] is True
assert res.json["article"]["favoritesCount"] == 1
user = User.by_username("one", db=db)
assert [article.slug for article in user.favorites] == ["foo"] # type: ignore
res = testapp.delete(
"/api/articles/foo/favorite",
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
user = User.by_username("one", db=db)
assert res.json["article"]["favorited"] is False
assert res.json["article"]["favoritesCount"] == 0
assert user.favorites == [] # type: ignore | src/conduit/article/tests/test_article_views.py |
from conduit.article.models import Article
from conduit.auth.models import User
from conduit.auth.tests.test_auth_views import USER_ONE_JWT
from conduit.auth.tests.test_auth_views import USER_TWO_JWT
from sqlalchemy.orm.session import Session
from webtest import TestApp
def test_feed(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles/feed."""
res = testapp.get(
"/api/articles/feed",
headers={"Authorization": f"Token {USER_TWO_JWT}"},
status=200,
)
assert res.json == {
"articlesCount": 2,
"articles": [
{
"slug": "bar",
"title": "Bär",
"description": "Bär desc",
"body": "Bär body",
"createdAt": "2019-03-03T03:03:03.000Z",
"updatedAt": "2019-04-04T04:04:04.000Z",
"tagList": [],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "one",
"bio": None,
"image": None,
"following": True,
},
},
{
"slug": "foo",
"title": "Foö",
"description": "Foö desc",
"body": "Foö body",
"createdAt": "2019-01-01T01:01:01.000Z",
"updatedAt": "2019-02-02T02:02:02.000Z",
"tagList": ["dogs", "cats"],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "one",
"bio": None,
"image": None,
"following": True,
},
},
],
}
def test_GET_articles(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles."""
res = testapp.get("/api/articles", status=200)
assert res.json == {
"articlesCount": 3,
"articles": [
{
"slug": "i-am-johnjacob",
"title": "I am <NAME>",
"description": "johnjacob desc",
"body": "johnjacob body",
"createdAt": "2019-05-05T05:05:05.000Z",
"updatedAt": "2019-06-06T06:06:06.000Z",
"tagList": [],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "johnjacob",
"bio": None,
"image": None,
"following": False,
},
},
{
"slug": "bar",
"title": "Bär",
"description": "Bär desc",
"body": "Bär body",
"createdAt": "2019-03-03T03:03:03.000Z",
"updatedAt": "2019-04-04T04:04:04.000Z",
"tagList": [],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "one",
"bio": None,
"image": None,
"following": False,
},
},
{
"slug": "foo",
"title": "Foö",
"description": "Foö desc",
"body": "Foö body",
"createdAt": "2019-01-01T01:01:01.000Z",
"updatedAt": "2019-02-02T02:02:02.000Z",
"tagList": ["dogs", "cats"],
"favorited": False,
"favoritesCount": 0,
"author": {
"username": "one",
"bio": None,
"image": None,
"following": False,
},
},
],
}
def test_GET_articles_filter_by_author(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles, filter by author."""
res = testapp.get("/api/articles?author=one", status=200)
assert res.json["articlesCount"] == 2
assert res.json["articles"][0]["slug"] == "bar"
assert res.json["articles"][1]["slug"] == "foo"
res = testapp.get("/api/articles?author=two", status=200)
assert res.json["articlesCount"] == 0
def test_GET_articles_filter_by_tag(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles, filter by author."""
res = testapp.get("/api/articles?tag=dogs", status=200)
assert res.json["articlesCount"] == 1
assert res.json["articles"][0]["slug"] == "foo"
def test_GET_articles_limit(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles, but limit to N results."""
res = testapp.get("/api/articles", status=200)
assert res.json["articlesCount"] == 3
res = testapp.get("/api/articles?limit=2", status=200)
assert res.json["articlesCount"] == 2
def test_GET_articles_offset(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles, but limit to N results, offset by M results."""
res = testapp.get("/api/articles?limit=2", status=200)
assert res.json["articles"][1]["title"] == "Bär"
res = testapp.get("/api/articles?limit=2&offset=1", status=200)
assert res.json["articles"][1]["title"] == "Foö"
def test_GET_article(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles/{slug}."""
res = testapp.get("/api/articles/foo", status=200)
assert res.json == {
"article": {
"author": {
"bio": None,
"following": False,
"image": None,
"username": "one",
},
"body": "Foö body",
"createdAt": "2019-01-01T01:01:01.000Z",
"description": "Foö desc",
"favorited": False,
"favoritesCount": 0,
"slug": "foo",
"tagList": ["dogs", "cats"],
"title": "Foö",
"updatedAt": "2019-02-02T02:02:02.000Z",
}
}
def test_GET_article_authenticated(testapp: TestApp, democontent: None) -> None:
"""Test GET /api/articles/{slug}."""
res = testapp.get(
"/api/articles/foo",
headers={"Authorization": f"Token {USER_TWO_JWT}"},
status=200,
)
assert res.json == {
"article": {
"author": {
"bio": None,
"following": True,
"image": None,
"username": "one",
},
"body": "Foö body",
"createdAt": "2019-01-01T01:01:01.000Z",
"description": "Foö desc",
"favorited": False,
"favoritesCount": 0,
"slug": "foo",
"tagList": ["dogs", "cats"],
"title": "Foö",
"updatedAt": "2019-02-02T02:02:02.000Z",
}
}
def test_POST_article(testapp: TestApp, democontent: None) -> None:
"""Test POST /api/articles."""
res = testapp.post_json(
"/api/articles",
{
"article": {
"title": "A title",
"description": "A description",
"body": "A body",
"tagList": ["one", "two"],
}
},
headers={"Authorization": f"Token {USER_TWO_JWT}"},
status=201,
)
assert res.json["article"]["author"]["username"] == "two"
assert res.json["article"]["title"] == "A title"
assert res.json["article"]["description"] == "A description"
assert res.json["article"]["body"] == "A body"
assert res.json["article"]["tagList"] == ["one", "two"]
# TODO: mock createdAt and updatedAt to be able to compare entire output
# "article": {
# "author": {"bio": "", "following": True, "image": "", "username": "two"},
# "body": "A body",
# "createdAt": "2019-01-01T00:00:00Z",
# "description": "A description",
# "favorited": False,
# "favoritesCount": 0,
# "slug": "a-title",
# "tagList": ["foo", "bar"], # TODO: taglist support
# "title": "A title",
# "createdAt": "2019-01-01T00:00:00Z",
# }
# }
def test_PUT_article(testapp: TestApp, democontent: None) -> None:
"""Test PUT /api/articles/{slug}."""
res = testapp.put_json(
"/api/articles/foo",
{
"article": {
"title": "New title",
"description": "New description",
"body": "New body",
}
},
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
assert res.json == {
"article": {
"author": {
"bio": None,
"following": False,
"image": None,
"username": "one",
},
"body": "New body",
"createdAt": "2019-01-01T01:01:01.000Z",
"description": "New description",
"favorited": False,
"favoritesCount": 0,
"slug": "foo",
"tagList": ["dogs", "cats"],
"title": "New title",
"updatedAt": "2019-02-02T02:02:02.000Z",
}
}
def test_PUT_article_empty_request(testapp: TestApp, democontent: None) -> None:
"""Test PUT /api/articles/{slug} with empty request, nothing happens."""
testapp.put_json(
"/api/articles/foo",
{"article": {}},
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
def test_DELETE_article(testapp: TestApp, db: Session, democontent: None) -> None:
"""Test DELETE /api/articles/{slug}."""
assert Article.by_slug("foo", db=db) is not None
testapp.delete(
"/api/articles/foo",
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
assert Article.by_slug("foo", db=db) is None
def test_favorite_unfavorite_article(
testapp: TestApp, db: Session, democontent: None
) -> None:
"""Test POST/DELETE /api/articles/{slug}/favorite."""
user = User.by_username("one", db=db)
assert user.favorites == [] # type: ignore
res = testapp.post_json(
"/api/articles/foo/favorite",
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
assert res.json["article"]["favorited"] is True
assert res.json["article"]["favoritesCount"] == 1
user = User.by_username("one", db=db)
assert [article.slug for article in user.favorites] == ["foo"] # type: ignore
res = testapp.delete(
"/api/articles/foo/favorite",
headers={"Authorization": f"Token {USER_ONE_JWT}"},
status=200,
)
user = User.by_username("one", db=db)
assert res.json["article"]["favorited"] is False
assert res.json["article"]["favoritesCount"] == 0
assert user.favorites == [] # type: ignore | 0.75401 | 0.50531 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from cv2 import resize
import logging
from utils.misc_utils import get_center
Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])
def im2rgb(im):
if len(im.shape) != 3:
im = np.stack([im, im, im], -1)
return im
def convert_bbox_format(bbox, to):
x, y, target_width, target_height = bbox.x, bbox.y, bbox.width, bbox.height
if to == 'top-left-based':
x -= get_center(target_width)
y -= get_center(target_height)
elif to == 'center-based':
y += get_center(target_height)
x += get_center(target_width)
else:
raise ValueError("Bbox format: {} was not recognized".format(to))
return Rectangle(x, y, target_width, target_height)
def get_exemplar_images(images, exemplar_size, targets_pos=None):
"""Crop exemplar image from input images"""
with tf.name_scope('get_exemplar_image'):
batch_size, x_height, x_width = images.get_shape().as_list()[:3]
z_height, z_width = exemplar_size
if targets_pos is None:
target_pos_single = [[get_center(x_height), get_center(x_width)]]
targets_pos_ = tf.tile(target_pos_single, [batch_size, 1])
else:
targets_pos_ = targets_pos
# convert to top-left corner based coordinates
top = tf.to_int32(tf.round(targets_pos_[:, 0] - get_center(z_height)))
bottom = tf.to_int32(top + z_height)
left = tf.to_int32(tf.round(targets_pos_[:, 1] - get_center(z_width)))
right = tf.to_int32(left + z_width)
def _slice(x):
f, t, l, b, r = x
c = f[t:b, l:r]
return c
exemplar_img = tf.map_fn(_slice, (images, top, left, bottom, right), dtype=images.dtype)
exemplar_img.set_shape([batch_size, z_height, z_width, 3])
return exemplar_img
def get_crops(im, bbox, size_z, size_x, context_amount):
"""Obtain image sub-window, padding with avg channel if area goes outside of border
Adapted from https://github.com/bertinetto/siamese-fc/blob/master/ILSVRC15-curation/save_crops.m#L46
Args:
im: Image ndarray
bbox: Named tuple (x, y, width, height) x, y corresponds to the crops center
size_z: Target + context size
size_x: The resultant crop size
context_amount: The amount of context
Returns:
image crop: Image ndarray
"""
cy, cx, h, w = bbox.y, bbox.x, bbox.height, bbox.width
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
d_search = (size_x - size_z) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
scale_x = size_x / s_x
image_crop_x, _, _, _, _ = get_subwindow_avg(im, [cy, cx],
[size_x, size_x],
[np.round(s_x), np.round(s_x)])
return image_crop_x, scale_x
def get_subwindow_avg(im, pos, model_sz, original_sz):
# avg_chans = np.mean(im, axis=(0, 1)) # This version is 3x slower
avg_chans = [np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])]
if not original_sz:
original_sz = model_sz
sz = original_sz
im_sz = im.shape
# make sure the size is not too small
assert im_sz[0] > 2 and im_sz[1] > 2
c = [get_center(s) for s in sz]
# check out-of-bounds coordinates, and set them to avg_chans
context_xmin = np.int(np.round(pos[1] - c[1]))
context_xmax = np.int(context_xmin + sz[1] - 1)
context_ymin = np.int(np.round(pos[0] - c[0]))
context_ymax = np.int(context_ymin + sz[0] - 1)
left_pad = np.int(np.maximum(0, -context_xmin))
top_pad = np.int(np.maximum(0, -context_ymin))
right_pad = np.int(np.maximum(0, context_xmax - im_sz[1] + 1))
bottom_pad = np.int(np.maximum(0, context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
if top_pad > 0 or bottom_pad > 0 or left_pad > 0 or right_pad > 0:
R = np.pad(im[:, :, 0], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[0]))
G = np.pad(im[:, :, 1], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[1]))
B = np.pad(im[:, :, 2], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[2]))
im = np.stack((R, G, B), axis=2)
im_patch_original = im[context_ymin:context_ymax + 1,
context_xmin:context_xmax + 1, :]
if not (model_sz[0] == original_sz[0] and model_sz[1] == original_sz[1]):
im_patch = resize(im_patch_original, tuple(model_sz))
else:
im_patch = im_patch_original
return im_patch, left_pad, top_pad, right_pad, bottom_pad
def get_saver(keyword, removes, excepts,repl=[]):
vars_need_load = {}
for v in (tf.global_variables()):
vname = v.name
if vname.find(keyword)!=-1:
for eeexxx in excepts:
if vname.find(eeexxx)!=-1:
logging.warning('No Load: '+vname)
break
else:
vname_ori = vname
for r in removes:
vname = vname.replace(r,'')
for r in repl:
vname = vname.replace(r[0],r[1])
vars_need_load[vname] = v
logging.warning('Load: ' + vname + ' as ' + vname_ori)
else:
logging.warning('No Load: '+vname)
return tf.train.Saver(vars_need_load) | utils/infer_utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
from cv2 import resize
import logging
from utils.misc_utils import get_center
Rectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])
def im2rgb(im):
if len(im.shape) != 3:
im = np.stack([im, im, im], -1)
return im
def convert_bbox_format(bbox, to):
x, y, target_width, target_height = bbox.x, bbox.y, bbox.width, bbox.height
if to == 'top-left-based':
x -= get_center(target_width)
y -= get_center(target_height)
elif to == 'center-based':
y += get_center(target_height)
x += get_center(target_width)
else:
raise ValueError("Bbox format: {} was not recognized".format(to))
return Rectangle(x, y, target_width, target_height)
def get_exemplar_images(images, exemplar_size, targets_pos=None):
"""Crop exemplar image from input images"""
with tf.name_scope('get_exemplar_image'):
batch_size, x_height, x_width = images.get_shape().as_list()[:3]
z_height, z_width = exemplar_size
if targets_pos is None:
target_pos_single = [[get_center(x_height), get_center(x_width)]]
targets_pos_ = tf.tile(target_pos_single, [batch_size, 1])
else:
targets_pos_ = targets_pos
# convert to top-left corner based coordinates
top = tf.to_int32(tf.round(targets_pos_[:, 0] - get_center(z_height)))
bottom = tf.to_int32(top + z_height)
left = tf.to_int32(tf.round(targets_pos_[:, 1] - get_center(z_width)))
right = tf.to_int32(left + z_width)
def _slice(x):
f, t, l, b, r = x
c = f[t:b, l:r]
return c
exemplar_img = tf.map_fn(_slice, (images, top, left, bottom, right), dtype=images.dtype)
exemplar_img.set_shape([batch_size, z_height, z_width, 3])
return exemplar_img
def get_crops(im, bbox, size_z, size_x, context_amount):
"""Obtain image sub-window, padding with avg channel if area goes outside of border
Adapted from https://github.com/bertinetto/siamese-fc/blob/master/ILSVRC15-curation/save_crops.m#L46
Args:
im: Image ndarray
bbox: Named tuple (x, y, width, height) x, y corresponds to the crops center
size_z: Target + context size
size_x: The resultant crop size
context_amount: The amount of context
Returns:
image crop: Image ndarray
"""
cy, cx, h, w = bbox.y, bbox.x, bbox.height, bbox.width
wc_z = w + context_amount * (w + h)
hc_z = h + context_amount * (w + h)
s_z = np.sqrt(wc_z * hc_z)
scale_z = size_z / s_z
d_search = (size_x - size_z) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
scale_x = size_x / s_x
image_crop_x, _, _, _, _ = get_subwindow_avg(im, [cy, cx],
[size_x, size_x],
[np.round(s_x), np.round(s_x)])
return image_crop_x, scale_x
def get_subwindow_avg(im, pos, model_sz, original_sz):
# avg_chans = np.mean(im, axis=(0, 1)) # This version is 3x slower
avg_chans = [np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])]
if not original_sz:
original_sz = model_sz
sz = original_sz
im_sz = im.shape
# make sure the size is not too small
assert im_sz[0] > 2 and im_sz[1] > 2
c = [get_center(s) for s in sz]
# check out-of-bounds coordinates, and set them to avg_chans
context_xmin = np.int(np.round(pos[1] - c[1]))
context_xmax = np.int(context_xmin + sz[1] - 1)
context_ymin = np.int(np.round(pos[0] - c[0]))
context_ymax = np.int(context_ymin + sz[0] - 1)
left_pad = np.int(np.maximum(0, -context_xmin))
top_pad = np.int(np.maximum(0, -context_ymin))
right_pad = np.int(np.maximum(0, context_xmax - im_sz[1] + 1))
bottom_pad = np.int(np.maximum(0, context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
if top_pad > 0 or bottom_pad > 0 or left_pad > 0 or right_pad > 0:
R = np.pad(im[:, :, 0], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[0]))
G = np.pad(im[:, :, 1], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[1]))
B = np.pad(im[:, :, 2], ((top_pad, bottom_pad), (left_pad, right_pad)),
'constant', constant_values=(avg_chans[2]))
im = np.stack((R, G, B), axis=2)
im_patch_original = im[context_ymin:context_ymax + 1,
context_xmin:context_xmax + 1, :]
if not (model_sz[0] == original_sz[0] and model_sz[1] == original_sz[1]):
im_patch = resize(im_patch_original, tuple(model_sz))
else:
im_patch = im_patch_original
return im_patch, left_pad, top_pad, right_pad, bottom_pad
def get_saver(keyword, removes, excepts,repl=[]):
vars_need_load = {}
for v in (tf.global_variables()):
vname = v.name
if vname.find(keyword)!=-1:
for eeexxx in excepts:
if vname.find(eeexxx)!=-1:
logging.warning('No Load: '+vname)
break
else:
vname_ori = vname
for r in removes:
vname = vname.replace(r,'')
for r in repl:
vname = vname.replace(r[0],r[1])
vars_need_load[vname] = v
logging.warning('Load: ' + vname + ' as ' + vname_ori)
else:
logging.warning('No Load: '+vname)
return tf.train.Saver(vars_need_load) | 0.863765 | 0.381392 |
import sys
import traceback
from collections import deque
from threading import Condition, Thread
from time import time
""" Sits between incoming messages from a subscription, and the outgoing
publish method. Provides throttling / buffering capabilities.
When the parameters change, the handler may transition to a different kind
of handler
"""
class MessageHandler:
def __init__(self, previous_handler=None, publish=None):
if previous_handler:
self.last_publish = previous_handler.last_publish
self.throttle_rate = previous_handler.throttle_rate
self.queue_length = previous_handler.queue_length
self.publish = previous_handler.publish
else:
self.last_publish = 0
self.throttle_rate = 0
self.queue_length = 0
self.publish = publish
def set_throttle_rate(self, throttle_rate):
self.throttle_rate = throttle_rate / 1000.0
return self.transition()
def set_queue_length(self, queue_length):
self.queue_length = queue_length
return self.transition()
def time_remaining(self):
return max((self.last_publish + self.throttle_rate) - time(), 0)
def handle_message(self, msg):
self.last_publish = time()
self.publish(msg)
def transition(self):
if self.throttle_rate == 0 and self.queue_length == 0:
return self
elif self.queue_length == 0:
return ThrottleMessageHandler(self)
else:
return QueueMessageHandler(self)
def finish(self, block=True):
pass
class ThrottleMessageHandler(MessageHandler):
def handle_message(self, msg):
if self.time_remaining() == 0:
MessageHandler.handle_message(self, msg)
def transition(self):
if self.throttle_rate == 0 and self.queue_length == 0:
return MessageHandler(self)
elif self.queue_length == 0:
return self
else:
return QueueMessageHandler(self)
def finish(self, block=True):
pass
class QueueMessageHandler(MessageHandler, Thread):
def __init__(self, previous_handler):
Thread.__init__(self)
MessageHandler.__init__(self, previous_handler)
self.daemon = True
self.queue = deque(maxlen=self.queue_length)
self.c = Condition()
self.alive = True
self.start()
def handle_message(self, msg):
with self.c:
if not self.alive:
return
should_notify = len(self.queue) == 0
self.queue.append(msg)
if should_notify:
self.c.notify()
def transition(self):
if self.throttle_rate == 0 and self.queue_length == 0:
self.finish()
return MessageHandler(self)
elif self.queue_length == 0:
self.finish()
return ThrottleMessageHandler(self)
else:
with self.c:
old_queue = self.queue
self.queue = deque(maxlen=self.queue_length)
while len(old_queue) > 0:
self.queue.append(old_queue.popleft())
self.c.notify()
return self
def finish(self, block=True):
"""If throttle was set to 0, this pushes all buffered messages"""
# Notify the thread to finish
with self.c:
self.alive = False
self.c.notify()
if block:
self.join()
def run(self):
while self.alive:
msg = None
with self.c:
if len(self.queue) == 0:
self.c.wait()
else:
self.c.wait(self.time_remaining())
if self.alive and self.time_remaining() == 0 and len(self.queue) > 0:
msg = self.queue.popleft()
if msg is not None:
try:
MessageHandler.handle_message(self, msg)
except Exception:
traceback.print_exc(file=sys.stderr)
while self.time_remaining() == 0 and len(self.queue) > 0:
try:
MessageHandler.handle_message(self, self.queue[0])
except Exception:
traceback.print_exc(file=sys.stderr) | rosbridge_library/src/rosbridge_library/internal/subscription_modifiers.py |
import sys
import traceback
from collections import deque
from threading import Condition, Thread
from time import time
""" Sits between incoming messages from a subscription, and the outgoing
publish method. Provides throttling / buffering capabilities.
When the parameters change, the handler may transition to a different kind
of handler
"""
class MessageHandler:
def __init__(self, previous_handler=None, publish=None):
if previous_handler:
self.last_publish = previous_handler.last_publish
self.throttle_rate = previous_handler.throttle_rate
self.queue_length = previous_handler.queue_length
self.publish = previous_handler.publish
else:
self.last_publish = 0
self.throttle_rate = 0
self.queue_length = 0
self.publish = publish
def set_throttle_rate(self, throttle_rate):
self.throttle_rate = throttle_rate / 1000.0
return self.transition()
def set_queue_length(self, queue_length):
self.queue_length = queue_length
return self.transition()
def time_remaining(self):
return max((self.last_publish + self.throttle_rate) - time(), 0)
def handle_message(self, msg):
self.last_publish = time()
self.publish(msg)
def transition(self):
if self.throttle_rate == 0 and self.queue_length == 0:
return self
elif self.queue_length == 0:
return ThrottleMessageHandler(self)
else:
return QueueMessageHandler(self)
def finish(self, block=True):
pass
class ThrottleMessageHandler(MessageHandler):
def handle_message(self, msg):
if self.time_remaining() == 0:
MessageHandler.handle_message(self, msg)
def transition(self):
if self.throttle_rate == 0 and self.queue_length == 0:
return MessageHandler(self)
elif self.queue_length == 0:
return self
else:
return QueueMessageHandler(self)
def finish(self, block=True):
pass
class QueueMessageHandler(MessageHandler, Thread):
def __init__(self, previous_handler):
Thread.__init__(self)
MessageHandler.__init__(self, previous_handler)
self.daemon = True
self.queue = deque(maxlen=self.queue_length)
self.c = Condition()
self.alive = True
self.start()
def handle_message(self, msg):
with self.c:
if not self.alive:
return
should_notify = len(self.queue) == 0
self.queue.append(msg)
if should_notify:
self.c.notify()
def transition(self):
if self.throttle_rate == 0 and self.queue_length == 0:
self.finish()
return MessageHandler(self)
elif self.queue_length == 0:
self.finish()
return ThrottleMessageHandler(self)
else:
with self.c:
old_queue = self.queue
self.queue = deque(maxlen=self.queue_length)
while len(old_queue) > 0:
self.queue.append(old_queue.popleft())
self.c.notify()
return self
def finish(self, block=True):
"""If throttle was set to 0, this pushes all buffered messages"""
# Notify the thread to finish
with self.c:
self.alive = False
self.c.notify()
if block:
self.join()
def run(self):
while self.alive:
msg = None
with self.c:
if len(self.queue) == 0:
self.c.wait()
else:
self.c.wait(self.time_remaining())
if self.alive and self.time_remaining() == 0 and len(self.queue) > 0:
msg = self.queue.popleft()
if msg is not None:
try:
MessageHandler.handle_message(self, msg)
except Exception:
traceback.print_exc(file=sys.stderr)
while self.time_remaining() == 0 and len(self.queue) > 0:
try:
MessageHandler.handle_message(self, self.queue[0])
except Exception:
traceback.print_exc(file=sys.stderr) | 0.439868 | 0.111338 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .builder import OPERATORS
def coords_grid(flow: Tensor) -> Tensor:
"""Generate shifted coordinate grid based based input flow.
Args:
flow (Tensor): Estimated optical flow.
Returns:
Tensor: The coordinate that shifted by input flow and scale in the
range [-1, 1].
"""
B, _, H, W = flow.shape
xx = torch.arange(0, W, device=flow.device, requires_grad=False)
yy = torch.arange(0, H, device=flow.device, requires_grad=False)
coords = torch.meshgrid(yy, xx)
coords = torch.stack(coords[::-1], dim=0).float()
grid = coords[None].repeat(B, 1, 1, 1) + flow
grid[:, 0, ...] = grid[:, 0, ...] * 2. / max(W - 1, 1) - 1.
grid[:, 1, ...] = grid[:, 1, ...] * 2. / max(H - 1, 1) - 1.
grid = grid.permute(0, 2, 3, 1)
return grid
@OPERATORS.register_module()
class Warp(nn.Module):
"""Warping layer to warp feature using optical flow.
Args:
mode (str): interpolation mode to calculate output values. Options are
'bilinear' and 'nearest'. Defaults to 'bilinear'.
padding_mode (str): padding mode for outside grid values. Options are
'zero', 'border' and 'reflection'. Defaults to 'zeros'.
align_corners (bool): If set to True, the extrema (-1 and 1) are
considered as referring to the center points of the input’s corner
pixels. If set to False, they are instead considered as referring
to the corner points of the input’s corner pixels, making the
sampling more resolution agnostic. Default to False.
"""
def __init__(self,
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
use_mask: bool = True) -> None:
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
self.use_mask = use_mask
def forward(self, feat: Tensor, flow: Tensor) -> Tensor:
"""Forward function for warp.
Args:
feat (Tensor): Input feature
flow (Tensor): Input optical flow.
Returns:
Tensor: The output feature that was generated by warping input
feature based input flow.
"""
grid = coords_grid(flow)
out = F.grid_sample(
feat,
grid,
mode=self.mode,
padding_mode=self.padding_mode,
align_corners=self.align_corners)
mask = torch.ones(feat.size(), device=feat.device, requires_grad=False)
if self.use_mask:
mask = F.grid_sample(
mask,
grid,
mode=self.mode,
padding_mode=self.padding_mode,
align_corners=self.align_corners)
mask = (mask > 0.9999).float()
return out * mask
def __repr__(self):
s = self.__class__.__name__
s += f'(mode={self.mode}, '
s += f'padding_mode={self.padding_mode}, '
s += f'align_corners={self.align_corners},'
s += f'use_mask={self.use_mask})'
return s | mmflow/ops/warp.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from .builder import OPERATORS
def coords_grid(flow: Tensor) -> Tensor:
"""Generate shifted coordinate grid based based input flow.
Args:
flow (Tensor): Estimated optical flow.
Returns:
Tensor: The coordinate that shifted by input flow and scale in the
range [-1, 1].
"""
B, _, H, W = flow.shape
xx = torch.arange(0, W, device=flow.device, requires_grad=False)
yy = torch.arange(0, H, device=flow.device, requires_grad=False)
coords = torch.meshgrid(yy, xx)
coords = torch.stack(coords[::-1], dim=0).float()
grid = coords[None].repeat(B, 1, 1, 1) + flow
grid[:, 0, ...] = grid[:, 0, ...] * 2. / max(W - 1, 1) - 1.
grid[:, 1, ...] = grid[:, 1, ...] * 2. / max(H - 1, 1) - 1.
grid = grid.permute(0, 2, 3, 1)
return grid
@OPERATORS.register_module()
class Warp(nn.Module):
"""Warping layer to warp feature using optical flow.
Args:
mode (str): interpolation mode to calculate output values. Options are
'bilinear' and 'nearest'. Defaults to 'bilinear'.
padding_mode (str): padding mode for outside grid values. Options are
'zero', 'border' and 'reflection'. Defaults to 'zeros'.
align_corners (bool): If set to True, the extrema (-1 and 1) are
considered as referring to the center points of the input’s corner
pixels. If set to False, they are instead considered as referring
to the corner points of the input’s corner pixels, making the
sampling more resolution agnostic. Default to False.
"""
def __init__(self,
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
use_mask: bool = True) -> None:
super().__init__()
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
self.use_mask = use_mask
def forward(self, feat: Tensor, flow: Tensor) -> Tensor:
"""Forward function for warp.
Args:
feat (Tensor): Input feature
flow (Tensor): Input optical flow.
Returns:
Tensor: The output feature that was generated by warping input
feature based input flow.
"""
grid = coords_grid(flow)
out = F.grid_sample(
feat,
grid,
mode=self.mode,
padding_mode=self.padding_mode,
align_corners=self.align_corners)
mask = torch.ones(feat.size(), device=feat.device, requires_grad=False)
if self.use_mask:
mask = F.grid_sample(
mask,
grid,
mode=self.mode,
padding_mode=self.padding_mode,
align_corners=self.align_corners)
mask = (mask > 0.9999).float()
return out * mask
def __repr__(self):
s = self.__class__.__name__
s += f'(mode={self.mode}, '
s += f'padding_mode={self.padding_mode}, '
s += f'align_corners={self.align_corners},'
s += f'use_mask={self.use_mask})'
return s | 0.958596 | 0.762579 |
import os
import sys
import argparse
import json
import uuid
import re
from pprint import pprint
from html.parser import HTMLParser
from html.entities import name2codepoint
# NOTE: This script is standalone does not include any libraries
DLG_JSON_HUMAN_EXTENSION = ".dlg_human.json"
ROOT_NODE_INDEX = -1
class Colors:
HEADER = '\033[95m'
BLUE = '\033[0;36m'
BLUE_LIGHT = '\033[1;36m'
GREEN = '\033[0;32m'
GREEN_LIGHT = '\033[1;32m'
YELLOW = '\033[0;33m'
YELLOW_LIGHT = '\033[1;33m'
RED = '\033[0;31m'
RED_LIGHT = '\033[1;31m'
# No Color
END = '\033[0m'
def print_newlines(nr = 1):
if nr > 0:
print('\n' * nr, end='')
def print_reset_color():
if sys.stdout.isatty():
print(Colors.END)
def _print_internal(color, string, **kwargs):
if sys.stdout.isatty():
# You're running in a real terminal
prefix, suffix = color, Colors.END
else:
# You're being piped or redirected
prefix, suffix = '', ''
print(prefix + string + suffix, **kwargs)
def print_red(*args, **kwargs):
_print_internal(Colors.RED, " ".join(map(str, args)), **kwargs)
def print_red_light(*args, **kwargs):
_print_internal(Colors.RED_LIGHT, " ".join(map(str, args)), **kwargs)
def print_blue(*args, **kwargs):
_print_internal(Colors.BLUE, " ".join(map(str, args)), **kwargs)
def print_blue_light(*args, **kwargs):
_print_internal(Colors.BLUE_LIGHT, " ".join(map(str, args)), **kwargs)
def print_yellow(*args, **kwargs):
_print_internal(Colors.YELLOW, " ".join(map(str, args)), **kwargs)
def print_yellow_light(*args, **kwargs):
_print_internal(Colors.YELLOW_LIGHT, " ".join(map(str, args)), **kwargs)
def print_green(*args, **kwargs):
_print_internal(Colors.GREEN, " ".join(map(str, args)), **kwargs)
def print_green_light(*args, **kwargs):
_print_internal(Colors.GREEN_LIGHT, " ".join(map(str, args)), **kwargs)
def print_config_value(config_name, config_value):
print_blue("{} = ".format(config_name), end='')
print_blue_light(config_value)
def string_to_int(string):
try:
return int(string)
except ValueError:
return None
class TwineNodeTag:
NODE_START = "node-start"
NODE_END = "node-end"
NODE_VIRTUAL_PARENT = "node-virtual-parent"
NODE_SPEECH = "node-speech"
NODE_SPEECH_SEQUENCE = "node-speech-sequence"
NODE_SELECTOR_FIRST = "node-selector-first"
NODE_SELECTOR_RANDOM = "node-selector-random"
@classmethod
def get_all_tags(cls):
return set([cls.NODE_START, cls.NODE_END, cls.NODE_VIRTUAL_PARENT, cls.NODE_SPEECH, cls.NODE_SPEECH_SEQUENCE, cls.NODE_SELECTOR_FIRST, cls.NODE_SELECTOR_RANDOM])
@classmethod
def is_valid_tag(cls, tag):
return tag.lower() in cls.get_all_tags()
@classmethod
def has_valid_tags(cls, tags_list):
tags_set = set([x.lower() for x in tags_list])
common = cls.get_all_tags().intersection(tags_set)
return bool(common)
class TwineHelper:
REGEX_NAME = r"(-?\d+)\.\s*(.*)"
@classmethod
def parse_twine_node_name(cls, raw_name, context_multiple_matches, context_invalid_index, context_invalid_speaker):
# Get node index and speaker
matches_name = re.finditer(cls.REGEX_NAME, raw_name, re.MULTILINE | re.UNICODE)
node_index, speaker = None, None
for index, match in enumerate(matches_name):
if index > 0:
print_yellow("{}, got multiple name matches".format(context_multiple_matches))
break
group_index = match.group(1)
if group_index is not None:
node_index = string_to_int(group_index.strip())
else:
print_yellow("{}, could not get node index from <node index>. <Speaker>".format(context_invalid_index))
group_speaker = match.group(2)
if group_index is not None:
speaker = group_speaker.strip()
else:
print_yellow("{}, could not get speaker from <node index>. <Speaker>".format(context_invalid_speaker))
return node_index, speaker
@classmethod
def clean_text(cls, text):
# Use windows line endings
return text.strip().replace("\n", "\r\n")
class TwineEdgeData:
IGNORE_EMPTY_TEXT_FLAG = "~ignore~"
def __init__(self):
self.raw_data = None
self.raw_text = None
self.text = None
self.target_node_index = None
self.owner_node_index = None
# The edge has empty text
def is_empty_edge_text(self):
return self.raw_text is None or self.IGNORE_EMPTY_TEXT_FLAG in self.raw_text.lower()
def parse(self):
# TODO make sure there are not multiple of these
parts = self.raw_data.split("|")
if len(parts) != 2:
print_yellow("Node Index = {} has an edge with len(parts) = {}. There must be exactly 2. Did you use `|` inside your edge?".format(self.owner_node_index, len(parts)))
return
# Text
self.raw_text = parts[0]
if self.is_empty_edge_text():
self.text = ""
else:
self.text = TwineHelper.clean_text(self.raw_text)
# Target nnode index
context_parse_name = "Node Index = {} Edge, parts[1] = `{}`".format(self.owner_node_index, parts[1])
self.target_node_index, ignored_speaker = TwineHelper.parse_twine_node_name(parts[1], context_parse_name, context_parse_name, context_parse_name)
def to_dict(self):
if self.text is None or self.target_node_index is None or self.target_node_index < ROOT_NODE_INDEX:
print(self.text)
print_yellow("Node index = {}, Edge invalid = {}. ignoring.".format(self.owner_node_index, str(self)))
return {}
return {
"TargetNodeIndex": self.target_node_index,
"Text": self.text
}
def __str__(self):
return "TwineEdgeData(target_node_index = {}, text = `{}`)".format(self.target_node_index, self.text)
def __repr__(self):
return str(self)
class TwineInnerEdgeData:
REGEX_SPEAKER = r"``\s*Speaker\s*:\s*``\s*//(.*)//"
REGEX_TEXT = r"``\s*Text\s*:\s*``\s*//(.*)//"
REGEX_EDGE_TEXT = r"``\s*EdgeText\s*:\s*``\s*//(.*)//"
def __init__(self):
self.raw_data = None
self.speaker = None
self.text = None
self.edge_text = None
self.owner_node_index = None
def parse(self):
# Parse speaker
matches_text = re.finditer(self.REGEX_SPEAKER, self.raw_data, re.MULTILINE | re.UNICODE | re.IGNORECASE)
for index, match in enumerate(matches_text):
if index > 0:
print_yellow("Node speech sequence Index = {} got multiple matches for Speaker".format(self.owner_node_index))
break
group = match.group(1)
if group is None:
print_yellow("Node speech sequence Index = {} could not get group 1 that matches ``Speaker:`` //<Name>//".format(self.owner_node_index))
continue
self.speaker = group.strip()
# Parse text
matches_text = re.finditer(self.REGEX_TEXT, self.raw_data, re.MULTILINE | re.UNICODE | re.IGNORECASE)
for index, match in enumerate(matches_text):
if index > 0:
print_yellow("Node speech sequence Index = {} got multiple matches for Text".format(self.owner_node_index))
break
group = match.group(1)
if group is None:
print_yellow("Node speech sequence Index = {} could not get group 1 that matches ``Text:`` //<text>//".format(self.owner_node_index))
continue
self.text = TwineHelper.clean_text(group.strip())
# Parse edge text
matches_edge_text = re.finditer(self.REGEX_EDGE_TEXT, self.raw_data, re.MULTILINE | re.UNICODE | re.IGNORECASE)
for index, match in enumerate(matches_edge_text):
if index > 0:
print_yellow("Node speech sequence Index = {} got multiple matches for edge text".format(self.owner_node_index))
break
group = match.group(1)
if group is None:
print_yellow("Node speech sequence Index = {} could not get group 1 that matches ``EdgeText:`` //<edge_text>//".format(self.owner_node_index))
continue
self.edge_text = group.strip()
def to_dict(self):
if self.speaker is None or self.raw_data is None or self.text is None or self.edge_text is None:
return {}
return {
"Speaker": self.speaker,
"Text": self.text,
"EdgeText": self.edge_text
}
def __str__(self):
return "TwineInnerEdgeData(speaker = {}, text = {}, edge_text = `{}`)".format(self.speaker, self.text, self.edge_text)
def __repr__(self):
return str(self)
class TwineNodeData:
REGEX_EDGES = r"\[\[(.*)\]\]"
def __init__(self):
self.raw_name = None
self.raw_data = None
self.raw_tags = None
# Computed from raw data
self.node_index = None
self.speaker = None
self.text = ""
self.tags = []
self.edges = []
self.inner_edges = []
def __get_raw_data_until_edges(self):
index_edge_start = self.raw_data.find("[[")
if index_edge_start == -1:
# take whole string
return self.raw_data
# Until the first
return self.raw_data[0:index_edge_start]
def _parse_text(self):
if not self.can_have_text():
return
self.text = TwineHelper.clean_text(self.__get_raw_data_until_edges())
def _parse_edges(self):
# Refuse to parse, because on some nodes we don't care about the edge text
if not self.raw_data or not self.can_have_text_on_edges():
return None
matches = re.finditer(self.REGEX_EDGES, self.raw_data, re.MULTILINE | re.UNICODE)
for index, match in enumerate(matches):
group = match.group(1)
if group is None:
print_yellow("Node Index = {} could not get group 1 that matches [[<edge content>|<edge index>]]".format(self.node_index))
continue
edge = TwineEdgeData()
edge.raw_data = group.strip()
edge.owner_node_index = self.node_index
edge.parse()
self.edges.append(edge)
# only for speech sequence nodese
def _parse_inner_edges(self):
if not self.is_node_speech_sequence() or not self.raw_data:
return
raw_text_data = self.__get_raw_data_until_edges().strip()
inner_edges_parts = raw_text_data.split("---")
if not inner_edges_parts:
print_yellow("Node Index = {} which is a speech sequence node does not have inner edges".format(self.node_index))
return
for raw_inner_edge in inner_edges_parts:
inner_edge = TwineInnerEdgeData()
inner_edge.raw_data = raw_inner_edge.strip()
inner_edge.owner_node_index = self.node_index
inner_edge.parse()
self.inner_edges.append(inner_edge)
def parse(self):
self.tags = [x.lower() for x in self.raw_tags.strip().split(" ")]
# Get node index and speaker
context_parse_name = "Node Name = {}".format(self.raw_name)
self.node_index, self.speaker = TwineHelper.parse_twine_node_name(self.raw_name, context_parse_name, context_parse_name, context_parse_name)
self._parse_text()
if not TwineNodeTag.has_valid_tags(self.tags):
print_yellow("Node Index = {} does not have any valid tags = {}".format(self.node_index, self.tags))
self._parse_edges()
self._parse_inner_edges()
def can_have_text(self):
return self.is_node_speech() or self.is_node_virtual_parent()
def can_have_text_on_edges(self):
return self.is_node_start() or self.is_node_speech() or self.is_node_speech_sequence()
def is_node_start(self):
return TwineNodeTag.NODE_START in self.tags
def is_node_end(self):
return TwineNodeTag.NODE_END in self.tags
def is_node_speech(self):
return TwineNodeTag.NODE_SPEECH in self.tags
def is_node_virtual_parent(self):
return TwineNodeTag.NODE_VIRTUAL_PARENT in self.tags
def is_node_speech_sequence(self):
return TwineNodeTag.NODE_SPEECH_SEQUENCE in self.tags
def is_node_selector(self):
return self.is_node_selector_first() or self.is_node_selector_random()
def is_node_selector_first(self):
return TwineNodeTag.NODE_SELECTOR_FIRST in self.tags
def is_node_selector_random(self):
return TwineNodeTag.NODE_SELECTOR_RANDOM in self.tags
def to_dict(self):
if self.node_index is None or self.node_index < ROOT_NODE_INDEX:
print_yellow("Node Index = {} is invalid ignoring".format(self.node_index))
return {}
edges = []
for edge in self.edges:
edges.append(edge.to_dict())
inner_edges = []
for inner_edge in self.inner_edges:
inner_edges.append(inner_edge.to_dict())
if self.is_node_speech_sequence():
return {
"NodeIndex": self.node_index,
"Speaker": self.speaker,
"Sequence": inner_edges,
"Edges": edges
}
if self.can_have_text() or self.is_node_start():
return {
"NodeIndex": self.node_index,
"Speaker": self.speaker,
"Text": self.text,
"Edges": edges
}
return {}
def __str__(self):
return "TwineNodeData(node_index = {}, speakr = {}, tags = {}, text = `{}`, edges = {})".format(self.node_index, self.speaker, self.tags, self.text, self.edges)
def __repr__(self):
return str(self)
class TwineDocumentData:
def __init__(self):
self.raw_guid = None
self.dialogue_name = None
self.dialogue_guid = None
self.nodes = []
def _parse_dialogue_guid(self):
# Convert to default Unreal uuid
temp_uuid = uuid.UUID(self.raw_guid)
self.dialogue_guid = temp_uuid.hex.upper()
def parse(self):
self._parse_dialogue_guid()
def to_dict(self):
if self.dialogue_name is None or self.dialogue_guid is None or not self.nodes:
return {}
speech_nodes = []
speech_sequence_nodes = []
for node in self.nodes:
if node.is_node_speech_sequence():
speech_sequence_nodes.append(node.to_dict())
elif node.is_node_speech() or node.is_node_virtual_parent() or node.is_node_start():
speech_nodes.append(node.to_dict())
else:
# Ignore
pass
return {
"DialogueName": self.dialogue_name,
"DialogueGuid": self.dialogue_guid,
"SpeechNodes": speech_nodes,
"SpeechSequenceNodes": speech_sequence_nodes
}
def __str__(self):
return "TwineDocumentData(dialogue_name = {}, dialogue_guid = {}, nodes =\n{})".format(self.dialogue_name, self.dialogue_guid, "\n".join(str(n) for n in self.nodes))
def __repr__(self):
return str(self)
class TwineHtmlParser(HTMLParser):
HTML_TAG_STORYDATA = "tw-storydata"
HTML_TAG_PASSAGE_DATA = "tw-passagedata"
HTML_ATTR_NAME = "name"
HTML_ATTR_TAGS = "tags"
HTML_ATTR_GUID = "ifid"
def __init__(self):
super().__init__()
self.document = TwineDocumentData()
self.current_tag = None
self.current_node = None
def handle_starttag(self, tag, attrs):
# print("Start tag:", tag)
self.current_tag = tag
if tag == self.HTML_TAG_STORYDATA:
# Data about dialogue
for attr in attrs:
attr_name, attr_value = attr
if attr_name == self.HTML_ATTR_NAME:
self.document.dialogue_name = attr_value.strip()
elif attr_name == self.HTML_ATTR_GUID:
self.document.raw_guid = attr_value.strip()
elif tag == self.HTML_TAG_PASSAGE_DATA:
# Data about each node
self.current_node = TwineNodeData()
self.document.nodes.append(self.current_node)
for attr in attrs:
attr_name, attr_value = attr
if attr_name == self.HTML_ATTR_NAME:
self.current_node.raw_name = attr_value.strip()
elif attr_name == self.HTML_ATTR_TAGS:
self.current_node.raw_tags = attr_value.strip()
def handle_endtag(self, tag):
if tag == self.HTML_TAG_STORYDATA:
self.document.parse()
elif tag == self.HTML_TAG_PASSAGE_DATA:
self.current_node.parse()
self.current_tag = None
self.current_node = None
# print("End tag :", tag)
def handle_data(self, data):
if self.current_tag is None:
return
if self.current_node is None:
return
if self.current_tag == self.HTML_TAG_PASSAGE_DATA:
self.current_node.raw_data = data.strip()
def handle_comment(self, data):
print("Comment :", data)
def handle_entityref(self, name):
c = chr(name2codepoint[name])
print("Named ent:", c)
def handle_charref(self, name):
if name.startswith('x'):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
print("Num ent :", c)
def handle_decl(self, data):
print("Decl :", data)
def exit_program(status):
sys.exit(status)
def exit_program_error(message=None):
if message is not None:
print_red(message)
exit_program(1)
def exit_program_success():
exit_program(0)
def convert_path_to_absolute_if_not_already(path):
if not os.path.isabs(path):
return os.path.abspath(path)
return path
def is_path_twine_file(path):
if not os.path.isfile(path):
return False
filename = os.path.basename(str(path))
file, extension = os.path.splitext(filename)
if extension != ".html":
return False
# TODO Maybe parse the contents
return True
def json_save_dictionary(path, dictionary):
try:
with open(path, 'w') as fh:
try:
json.dump(dictionary, fh, indent=4)
except ValueError as e:
print_red("Can't save file = `{}`. Error = `{}`".format(path, e))
return None
except IOError as e:
print_red("Can't open file = `{}`. IOError = `{}`".format(path, e))
def twine_parse_file(path):
"""
Returns a dictionary
"""
try:
with open(path, 'r', encoding="utf8") as fh:
parser = TwineHtmlParser()
parser.feed(fh.read())
return parser.document
except IOError as e:
print_red("Can't open file = `{}`. IOError = `{}`".format(path, e))
return None
def export_twine_file_dlg_text_json(src_file_path, src_twine_dir_from, dst_json_dir):
# Construct subdirectory we need to create our destination file
src_dirname, src_filename = os.path.split(src_file_path)
src_dirname_parts = src_dirname.split(os.sep)
dst_dirname = None
for index, part in enumerate(src_dirname_parts):
if part == src_twine_dir_from:
dst_dirname = os.sep.join(src_dirname_parts[index + 1:])
break
if dst_dirname is None:
print_yellow("Can't find dst_dirname for src_file_path = `{}`".format(src_file_path))
return
# Ensure dirname exists in destination
dst_dirname = os.path.join(dst_json_dir, dst_dirname)
if not os.path.exists(dst_dirname):
os.makedirs(dst_dirname, exist_ok=True)
print_blue("Creating directory = `{}`".format(dst_dirname))
if not os.path.isdir(dst_json_dir):
print_yellow("Path = `{}` is not a directory. Ignoring".format(dst_dirname))
return
# Parse file
print_blue("Parsing file = `{}`".format(src_file_path))
twine_document = twine_parse_file(src_file_path)
if twine_document is None:
print_yellow("Can't parse twine file = `{}`".format(src_file_path))
return
#print(twine_document)
#print(twine_document.to_dict())
json_human_content = twine_document.to_dict()
if not json_human_content:
print_yellow("Twine file = `{}` is corrupt or invalid. Can't parse any data".format(src_file_path))
return
# Write file
src_file, src_file_ext = os.path.splitext(src_filename)
dst_file_path = os.path.join(dst_dirname, src_file) + DLG_JSON_HUMAN_EXTENSION
print_blue("Writing file = `{}`".format(dst_file_path))
json_save_dictionary(dst_file_path, json_human_content)
print("")
def main(src_twine_dir, dst_json_dir):
if not os.path.exists(src_twine_dir):
exit_program_error("src_twine_dir = `{}` does not exist".format(src_twine_dir))
if not os.path.isdir(src_twine_dir):
exit_program_error("src_twine_dir = `{}` is not a directory".format(src_twine_dir))
if not os.path.exists(dst_json_dir):
os.makedirs(dst_json_dir, exist_ok=True)
print_blue("Creating dst_json_dir = `{}`".format(dst_json_dir))
if not os.path.isdir(dst_json_dir):
exit_program_error("dst_json_dir = `{}` is not a directory".format(dst_json_dir))
# Walk over all files in directory
src_twine_dir = convert_path_to_absolute_if_not_already(src_twine_dir)
dst_json_dir = convert_path_to_absolute_if_not_already(dst_json_dir)
print_blue("Finding save files in src_twine_dir = {}\n".format(src_twine_dir))
# Directory from where files
src_twine_dir_from = os.path.basename(os.path.normpath(src_twine_dir))
for path, subdirs, files in os.walk(src_twine_dir):
for name in files:
full_filename = os.path.join(path, name)
if is_path_twine_file(full_filename):
export_twine_file_dlg_text_json(full_filename, src_twine_dir_from, dst_json_dir)
else:
print_yellow("Path = `{}` is not a file or a twine file".format(full_filename))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('src_twine_dir', nargs='?', type=str, help='Source directory from where we get all the .html twine files', default="DialoguesTwine/")
parser.add_argument('dst_json_dir', nargs='?', type=str, help='Destination directory where we store all the .dlg_human.json files', default="DialoguesJsonHumanText/")
args = parser.parse_args()
main(args.src_twine_dir, args.dst_json_dir) | Tools/DlgTwineToJsonHumanText.py |
import os
import sys
import argparse
import json
import uuid
import re
from pprint import pprint
from html.parser import HTMLParser
from html.entities import name2codepoint
# NOTE: This script is standalone does not include any libraries
DLG_JSON_HUMAN_EXTENSION = ".dlg_human.json"
ROOT_NODE_INDEX = -1
class Colors:
HEADER = '\033[95m'
BLUE = '\033[0;36m'
BLUE_LIGHT = '\033[1;36m'
GREEN = '\033[0;32m'
GREEN_LIGHT = '\033[1;32m'
YELLOW = '\033[0;33m'
YELLOW_LIGHT = '\033[1;33m'
RED = '\033[0;31m'
RED_LIGHT = '\033[1;31m'
# No Color
END = '\033[0m'
def print_newlines(nr = 1):
if nr > 0:
print('\n' * nr, end='')
def print_reset_color():
if sys.stdout.isatty():
print(Colors.END)
def _print_internal(color, string, **kwargs):
if sys.stdout.isatty():
# You're running in a real terminal
prefix, suffix = color, Colors.END
else:
# You're being piped or redirected
prefix, suffix = '', ''
print(prefix + string + suffix, **kwargs)
def print_red(*args, **kwargs):
_print_internal(Colors.RED, " ".join(map(str, args)), **kwargs)
def print_red_light(*args, **kwargs):
_print_internal(Colors.RED_LIGHT, " ".join(map(str, args)), **kwargs)
def print_blue(*args, **kwargs):
_print_internal(Colors.BLUE, " ".join(map(str, args)), **kwargs)
def print_blue_light(*args, **kwargs):
_print_internal(Colors.BLUE_LIGHT, " ".join(map(str, args)), **kwargs)
def print_yellow(*args, **kwargs):
_print_internal(Colors.YELLOW, " ".join(map(str, args)), **kwargs)
def print_yellow_light(*args, **kwargs):
_print_internal(Colors.YELLOW_LIGHT, " ".join(map(str, args)), **kwargs)
def print_green(*args, **kwargs):
_print_internal(Colors.GREEN, " ".join(map(str, args)), **kwargs)
def print_green_light(*args, **kwargs):
_print_internal(Colors.GREEN_LIGHT, " ".join(map(str, args)), **kwargs)
def print_config_value(config_name, config_value):
print_blue("{} = ".format(config_name), end='')
print_blue_light(config_value)
def string_to_int(string):
try:
return int(string)
except ValueError:
return None
class TwineNodeTag:
NODE_START = "node-start"
NODE_END = "node-end"
NODE_VIRTUAL_PARENT = "node-virtual-parent"
NODE_SPEECH = "node-speech"
NODE_SPEECH_SEQUENCE = "node-speech-sequence"
NODE_SELECTOR_FIRST = "node-selector-first"
NODE_SELECTOR_RANDOM = "node-selector-random"
@classmethod
def get_all_tags(cls):
return set([cls.NODE_START, cls.NODE_END, cls.NODE_VIRTUAL_PARENT, cls.NODE_SPEECH, cls.NODE_SPEECH_SEQUENCE, cls.NODE_SELECTOR_FIRST, cls.NODE_SELECTOR_RANDOM])
@classmethod
def is_valid_tag(cls, tag):
return tag.lower() in cls.get_all_tags()
@classmethod
def has_valid_tags(cls, tags_list):
tags_set = set([x.lower() for x in tags_list])
common = cls.get_all_tags().intersection(tags_set)
return bool(common)
class TwineHelper:
REGEX_NAME = r"(-?\d+)\.\s*(.*)"
@classmethod
def parse_twine_node_name(cls, raw_name, context_multiple_matches, context_invalid_index, context_invalid_speaker):
# Get node index and speaker
matches_name = re.finditer(cls.REGEX_NAME, raw_name, re.MULTILINE | re.UNICODE)
node_index, speaker = None, None
for index, match in enumerate(matches_name):
if index > 0:
print_yellow("{}, got multiple name matches".format(context_multiple_matches))
break
group_index = match.group(1)
if group_index is not None:
node_index = string_to_int(group_index.strip())
else:
print_yellow("{}, could not get node index from <node index>. <Speaker>".format(context_invalid_index))
group_speaker = match.group(2)
if group_index is not None:
speaker = group_speaker.strip()
else:
print_yellow("{}, could not get speaker from <node index>. <Speaker>".format(context_invalid_speaker))
return node_index, speaker
@classmethod
def clean_text(cls, text):
# Use windows line endings
return text.strip().replace("\n", "\r\n")
class TwineEdgeData:
IGNORE_EMPTY_TEXT_FLAG = "~ignore~"
def __init__(self):
self.raw_data = None
self.raw_text = None
self.text = None
self.target_node_index = None
self.owner_node_index = None
# The edge has empty text
def is_empty_edge_text(self):
return self.raw_text is None or self.IGNORE_EMPTY_TEXT_FLAG in self.raw_text.lower()
def parse(self):
# TODO make sure there are not multiple of these
parts = self.raw_data.split("|")
if len(parts) != 2:
print_yellow("Node Index = {} has an edge with len(parts) = {}. There must be exactly 2. Did you use `|` inside your edge?".format(self.owner_node_index, len(parts)))
return
# Text
self.raw_text = parts[0]
if self.is_empty_edge_text():
self.text = ""
else:
self.text = TwineHelper.clean_text(self.raw_text)
# Target nnode index
context_parse_name = "Node Index = {} Edge, parts[1] = `{}`".format(self.owner_node_index, parts[1])
self.target_node_index, ignored_speaker = TwineHelper.parse_twine_node_name(parts[1], context_parse_name, context_parse_name, context_parse_name)
def to_dict(self):
if self.text is None or self.target_node_index is None or self.target_node_index < ROOT_NODE_INDEX:
print(self.text)
print_yellow("Node index = {}, Edge invalid = {}. ignoring.".format(self.owner_node_index, str(self)))
return {}
return {
"TargetNodeIndex": self.target_node_index,
"Text": self.text
}
def __str__(self):
return "TwineEdgeData(target_node_index = {}, text = `{}`)".format(self.target_node_index, self.text)
def __repr__(self):
return str(self)
class TwineInnerEdgeData:
REGEX_SPEAKER = r"``\s*Speaker\s*:\s*``\s*//(.*)//"
REGEX_TEXT = r"``\s*Text\s*:\s*``\s*//(.*)//"
REGEX_EDGE_TEXT = r"``\s*EdgeText\s*:\s*``\s*//(.*)//"
def __init__(self):
self.raw_data = None
self.speaker = None
self.text = None
self.edge_text = None
self.owner_node_index = None
def parse(self):
# Parse speaker
matches_text = re.finditer(self.REGEX_SPEAKER, self.raw_data, re.MULTILINE | re.UNICODE | re.IGNORECASE)
for index, match in enumerate(matches_text):
if index > 0:
print_yellow("Node speech sequence Index = {} got multiple matches for Speaker".format(self.owner_node_index))
break
group = match.group(1)
if group is None:
print_yellow("Node speech sequence Index = {} could not get group 1 that matches ``Speaker:`` //<Name>//".format(self.owner_node_index))
continue
self.speaker = group.strip()
# Parse text
matches_text = re.finditer(self.REGEX_TEXT, self.raw_data, re.MULTILINE | re.UNICODE | re.IGNORECASE)
for index, match in enumerate(matches_text):
if index > 0:
print_yellow("Node speech sequence Index = {} got multiple matches for Text".format(self.owner_node_index))
break
group = match.group(1)
if group is None:
print_yellow("Node speech sequence Index = {} could not get group 1 that matches ``Text:`` //<text>//".format(self.owner_node_index))
continue
self.text = TwineHelper.clean_text(group.strip())
# Parse edge text
matches_edge_text = re.finditer(self.REGEX_EDGE_TEXT, self.raw_data, re.MULTILINE | re.UNICODE | re.IGNORECASE)
for index, match in enumerate(matches_edge_text):
if index > 0:
print_yellow("Node speech sequence Index = {} got multiple matches for edge text".format(self.owner_node_index))
break
group = match.group(1)
if group is None:
print_yellow("Node speech sequence Index = {} could not get group 1 that matches ``EdgeText:`` //<edge_text>//".format(self.owner_node_index))
continue
self.edge_text = group.strip()
def to_dict(self):
if self.speaker is None or self.raw_data is None or self.text is None or self.edge_text is None:
return {}
return {
"Speaker": self.speaker,
"Text": self.text,
"EdgeText": self.edge_text
}
def __str__(self):
return "TwineInnerEdgeData(speaker = {}, text = {}, edge_text = `{}`)".format(self.speaker, self.text, self.edge_text)
def __repr__(self):
return str(self)
class TwineNodeData:
REGEX_EDGES = r"\[\[(.*)\]\]"
def __init__(self):
self.raw_name = None
self.raw_data = None
self.raw_tags = None
# Computed from raw data
self.node_index = None
self.speaker = None
self.text = ""
self.tags = []
self.edges = []
self.inner_edges = []
def __get_raw_data_until_edges(self):
index_edge_start = self.raw_data.find("[[")
if index_edge_start == -1:
# take whole string
return self.raw_data
# Until the first
return self.raw_data[0:index_edge_start]
def _parse_text(self):
if not self.can_have_text():
return
self.text = TwineHelper.clean_text(self.__get_raw_data_until_edges())
def _parse_edges(self):
# Refuse to parse, because on some nodes we don't care about the edge text
if not self.raw_data or not self.can_have_text_on_edges():
return None
matches = re.finditer(self.REGEX_EDGES, self.raw_data, re.MULTILINE | re.UNICODE)
for index, match in enumerate(matches):
group = match.group(1)
if group is None:
print_yellow("Node Index = {} could not get group 1 that matches [[<edge content>|<edge index>]]".format(self.node_index))
continue
edge = TwineEdgeData()
edge.raw_data = group.strip()
edge.owner_node_index = self.node_index
edge.parse()
self.edges.append(edge)
# only for speech sequence nodese
def _parse_inner_edges(self):
if not self.is_node_speech_sequence() or not self.raw_data:
return
raw_text_data = self.__get_raw_data_until_edges().strip()
inner_edges_parts = raw_text_data.split("---")
if not inner_edges_parts:
print_yellow("Node Index = {} which is a speech sequence node does not have inner edges".format(self.node_index))
return
for raw_inner_edge in inner_edges_parts:
inner_edge = TwineInnerEdgeData()
inner_edge.raw_data = raw_inner_edge.strip()
inner_edge.owner_node_index = self.node_index
inner_edge.parse()
self.inner_edges.append(inner_edge)
def parse(self):
self.tags = [x.lower() for x in self.raw_tags.strip().split(" ")]
# Get node index and speaker
context_parse_name = "Node Name = {}".format(self.raw_name)
self.node_index, self.speaker = TwineHelper.parse_twine_node_name(self.raw_name, context_parse_name, context_parse_name, context_parse_name)
self._parse_text()
if not TwineNodeTag.has_valid_tags(self.tags):
print_yellow("Node Index = {} does not have any valid tags = {}".format(self.node_index, self.tags))
self._parse_edges()
self._parse_inner_edges()
def can_have_text(self):
return self.is_node_speech() or self.is_node_virtual_parent()
def can_have_text_on_edges(self):
return self.is_node_start() or self.is_node_speech() or self.is_node_speech_sequence()
def is_node_start(self):
return TwineNodeTag.NODE_START in self.tags
def is_node_end(self):
return TwineNodeTag.NODE_END in self.tags
def is_node_speech(self):
return TwineNodeTag.NODE_SPEECH in self.tags
def is_node_virtual_parent(self):
return TwineNodeTag.NODE_VIRTUAL_PARENT in self.tags
def is_node_speech_sequence(self):
return TwineNodeTag.NODE_SPEECH_SEQUENCE in self.tags
def is_node_selector(self):
return self.is_node_selector_first() or self.is_node_selector_random()
def is_node_selector_first(self):
return TwineNodeTag.NODE_SELECTOR_FIRST in self.tags
def is_node_selector_random(self):
return TwineNodeTag.NODE_SELECTOR_RANDOM in self.tags
def to_dict(self):
if self.node_index is None or self.node_index < ROOT_NODE_INDEX:
print_yellow("Node Index = {} is invalid ignoring".format(self.node_index))
return {}
edges = []
for edge in self.edges:
edges.append(edge.to_dict())
inner_edges = []
for inner_edge in self.inner_edges:
inner_edges.append(inner_edge.to_dict())
if self.is_node_speech_sequence():
return {
"NodeIndex": self.node_index,
"Speaker": self.speaker,
"Sequence": inner_edges,
"Edges": edges
}
if self.can_have_text() or self.is_node_start():
return {
"NodeIndex": self.node_index,
"Speaker": self.speaker,
"Text": self.text,
"Edges": edges
}
return {}
def __str__(self):
return "TwineNodeData(node_index = {}, speakr = {}, tags = {}, text = `{}`, edges = {})".format(self.node_index, self.speaker, self.tags, self.text, self.edges)
def __repr__(self):
return str(self)
class TwineDocumentData:
def __init__(self):
self.raw_guid = None
self.dialogue_name = None
self.dialogue_guid = None
self.nodes = []
def _parse_dialogue_guid(self):
# Convert to default Unreal uuid
temp_uuid = uuid.UUID(self.raw_guid)
self.dialogue_guid = temp_uuid.hex.upper()
def parse(self):
self._parse_dialogue_guid()
def to_dict(self):
if self.dialogue_name is None or self.dialogue_guid is None or not self.nodes:
return {}
speech_nodes = []
speech_sequence_nodes = []
for node in self.nodes:
if node.is_node_speech_sequence():
speech_sequence_nodes.append(node.to_dict())
elif node.is_node_speech() or node.is_node_virtual_parent() or node.is_node_start():
speech_nodes.append(node.to_dict())
else:
# Ignore
pass
return {
"DialogueName": self.dialogue_name,
"DialogueGuid": self.dialogue_guid,
"SpeechNodes": speech_nodes,
"SpeechSequenceNodes": speech_sequence_nodes
}
def __str__(self):
return "TwineDocumentData(dialogue_name = {}, dialogue_guid = {}, nodes =\n{})".format(self.dialogue_name, self.dialogue_guid, "\n".join(str(n) for n in self.nodes))
def __repr__(self):
return str(self)
class TwineHtmlParser(HTMLParser):
HTML_TAG_STORYDATA = "tw-storydata"
HTML_TAG_PASSAGE_DATA = "tw-passagedata"
HTML_ATTR_NAME = "name"
HTML_ATTR_TAGS = "tags"
HTML_ATTR_GUID = "ifid"
def __init__(self):
super().__init__()
self.document = TwineDocumentData()
self.current_tag = None
self.current_node = None
def handle_starttag(self, tag, attrs):
# print("Start tag:", tag)
self.current_tag = tag
if tag == self.HTML_TAG_STORYDATA:
# Data about dialogue
for attr in attrs:
attr_name, attr_value = attr
if attr_name == self.HTML_ATTR_NAME:
self.document.dialogue_name = attr_value.strip()
elif attr_name == self.HTML_ATTR_GUID:
self.document.raw_guid = attr_value.strip()
elif tag == self.HTML_TAG_PASSAGE_DATA:
# Data about each node
self.current_node = TwineNodeData()
self.document.nodes.append(self.current_node)
for attr in attrs:
attr_name, attr_value = attr
if attr_name == self.HTML_ATTR_NAME:
self.current_node.raw_name = attr_value.strip()
elif attr_name == self.HTML_ATTR_TAGS:
self.current_node.raw_tags = attr_value.strip()
def handle_endtag(self, tag):
if tag == self.HTML_TAG_STORYDATA:
self.document.parse()
elif tag == self.HTML_TAG_PASSAGE_DATA:
self.current_node.parse()
self.current_tag = None
self.current_node = None
# print("End tag :", tag)
def handle_data(self, data):
if self.current_tag is None:
return
if self.current_node is None:
return
if self.current_tag == self.HTML_TAG_PASSAGE_DATA:
self.current_node.raw_data = data.strip()
def handle_comment(self, data):
print("Comment :", data)
def handle_entityref(self, name):
c = chr(name2codepoint[name])
print("Named ent:", c)
def handle_charref(self, name):
if name.startswith('x'):
c = chr(int(name[1:], 16))
else:
c = chr(int(name))
print("Num ent :", c)
def handle_decl(self, data):
print("Decl :", data)
def exit_program(status):
sys.exit(status)
def exit_program_error(message=None):
if message is not None:
print_red(message)
exit_program(1)
def exit_program_success():
exit_program(0)
def convert_path_to_absolute_if_not_already(path):
if not os.path.isabs(path):
return os.path.abspath(path)
return path
def is_path_twine_file(path):
if not os.path.isfile(path):
return False
filename = os.path.basename(str(path))
file, extension = os.path.splitext(filename)
if extension != ".html":
return False
# TODO Maybe parse the contents
return True
def json_save_dictionary(path, dictionary):
try:
with open(path, 'w') as fh:
try:
json.dump(dictionary, fh, indent=4)
except ValueError as e:
print_red("Can't save file = `{}`. Error = `{}`".format(path, e))
return None
except IOError as e:
print_red("Can't open file = `{}`. IOError = `{}`".format(path, e))
def twine_parse_file(path):
"""
Returns a dictionary
"""
try:
with open(path, 'r', encoding="utf8") as fh:
parser = TwineHtmlParser()
parser.feed(fh.read())
return parser.document
except IOError as e:
print_red("Can't open file = `{}`. IOError = `{}`".format(path, e))
return None
def export_twine_file_dlg_text_json(src_file_path, src_twine_dir_from, dst_json_dir):
# Construct subdirectory we need to create our destination file
src_dirname, src_filename = os.path.split(src_file_path)
src_dirname_parts = src_dirname.split(os.sep)
dst_dirname = None
for index, part in enumerate(src_dirname_parts):
if part == src_twine_dir_from:
dst_dirname = os.sep.join(src_dirname_parts[index + 1:])
break
if dst_dirname is None:
print_yellow("Can't find dst_dirname for src_file_path = `{}`".format(src_file_path))
return
# Ensure dirname exists in destination
dst_dirname = os.path.join(dst_json_dir, dst_dirname)
if not os.path.exists(dst_dirname):
os.makedirs(dst_dirname, exist_ok=True)
print_blue("Creating directory = `{}`".format(dst_dirname))
if not os.path.isdir(dst_json_dir):
print_yellow("Path = `{}` is not a directory. Ignoring".format(dst_dirname))
return
# Parse file
print_blue("Parsing file = `{}`".format(src_file_path))
twine_document = twine_parse_file(src_file_path)
if twine_document is None:
print_yellow("Can't parse twine file = `{}`".format(src_file_path))
return
#print(twine_document)
#print(twine_document.to_dict())
json_human_content = twine_document.to_dict()
if not json_human_content:
print_yellow("Twine file = `{}` is corrupt or invalid. Can't parse any data".format(src_file_path))
return
# Write file
src_file, src_file_ext = os.path.splitext(src_filename)
dst_file_path = os.path.join(dst_dirname, src_file) + DLG_JSON_HUMAN_EXTENSION
print_blue("Writing file = `{}`".format(dst_file_path))
json_save_dictionary(dst_file_path, json_human_content)
print("")
def main(src_twine_dir, dst_json_dir):
if not os.path.exists(src_twine_dir):
exit_program_error("src_twine_dir = `{}` does not exist".format(src_twine_dir))
if not os.path.isdir(src_twine_dir):
exit_program_error("src_twine_dir = `{}` is not a directory".format(src_twine_dir))
if not os.path.exists(dst_json_dir):
os.makedirs(dst_json_dir, exist_ok=True)
print_blue("Creating dst_json_dir = `{}`".format(dst_json_dir))
if not os.path.isdir(dst_json_dir):
exit_program_error("dst_json_dir = `{}` is not a directory".format(dst_json_dir))
# Walk over all files in directory
src_twine_dir = convert_path_to_absolute_if_not_already(src_twine_dir)
dst_json_dir = convert_path_to_absolute_if_not_already(dst_json_dir)
print_blue("Finding save files in src_twine_dir = {}\n".format(src_twine_dir))
# Directory from where files
src_twine_dir_from = os.path.basename(os.path.normpath(src_twine_dir))
for path, subdirs, files in os.walk(src_twine_dir):
for name in files:
full_filename = os.path.join(path, name)
if is_path_twine_file(full_filename):
export_twine_file_dlg_text_json(full_filename, src_twine_dir_from, dst_json_dir)
else:
print_yellow("Path = `{}` is not a file or a twine file".format(full_filename))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('src_twine_dir', nargs='?', type=str, help='Source directory from where we get all the .html twine files', default="DialoguesTwine/")
parser.add_argument('dst_json_dir', nargs='?', type=str, help='Destination directory where we store all the .dlg_human.json files', default="DialoguesJsonHumanText/")
args = parser.parse_args()
main(args.src_twine_dir, args.dst_json_dir) | 0.290075 | 0.087564 |
"HTML Renderer for FPDF.py"
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright (C) 2010 <NAME>"
__license__ = "LGPL 3.0"
# Inspired by tuto5.py and several examples from fpdf.org, html2fpdf, etc.
from fpdf import FPDF
from HTMLParser import HTMLParser
DEBUG = False
def px2mm(px):
return int(px)*25.4/72.0
def hex2dec(color = "#000000"):
if color:
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
return r, g, b
class HTML2FPDF(HTMLParser):
"Render basic HTML to FPDF"
def __init__(self, pdf):
HTMLParser.__init__(self)
self.style = {}
self.pre = False
self.href = ''
self.align = ''
self.page_links = {}
self.font_list = ("times","courier", "helvetica")
self.pdf = pdf
self.r = self.g = self.b = 0
self.indent = 0
self.bullet = []
self.set_font("times", 12)
self.table = None # table attributes
self.table_col_width = None # column (header) widths
self.table_col_index = None # current column index
self.td = None # cell attributes
self.th = False # header enabled
self.tr = None
self.theader = None # table header cells
self.tfooter = None # table footer cells
self.thead = None
self.tfoot = None
self.theader_out = self.tfooter_out = False
def width2mm(self, length):
if length[-1]=='%':
total = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
if self.table['width'][-1]=='%':
total *= int(self.table['width'][:-1])/100.0
return int(length[:-1]) * total / 101.0
else:
return int(length) / 6.0
def handle_data(self, txt):
if self.td is not None: # drawing a table?
if 'width' not in self.td and 'colspan' not in self.td:
l = [self.table_col_width[self.table_col_index]]
elif 'colspan' in self.td:
i = self.table_col_index
colspan = int(self.td['colspan'])
l = self.table_col_width[i:i+colspan]
else:
l = [self.td.get('width','240')]
w = sum([self.width2mm(lenght) for lenght in l])
h = int(self.td.get('height', 0)) / 4 or self.h*1.30
self.table_h = h
border = int(self.table.get('border', 0))
if not self.th:
align = self.td.get('align', 'L')[0].upper()
border = border and 'LR'
else:
self.set_style('B',True)
border = border or 'B'
align = 'C'
bgcolor = hex2dec(self.td.get('bgcolor', self.tr.get('bgcolor', '')))
# parsing table header/footer (drawn later):
if self.thead is not None:
self.theader.append(((w,h,txt,border,0,align), bgcolor))
if self.tfoot is not None:
self.tfooter.append(((w,h,txt,border,0,align), bgcolor))
# check if reached end of page, add table footer and header:
height = h + (self.tfooter and self.tfooter[0][0][1] or 0)
if self.pdf.y+height>self.pdf.page_break_trigger and not self.th:
self.output_table_footer()
self.pdf.add_page()
self.theader_out = self.tfooter_out = False
if self.tfoot is None and self.thead is None:
if not self.theader_out:
self.output_table_header()
self.box_shadow(w, h, bgcolor)
if DEBUG: print "td cell", self.pdf.x, w, txt, "*"
self.pdf.cell(w,h,txt,border,0,align)
elif self.table is not None:
# ignore anything else than td inside a table
pass
elif self.align:
if DEBUG: print "cell", txt, "*"
self.pdf.cell(0,self.h,txt,0,1,self.align[0].upper(), self.href)
else:
txt = txt.replace("\n"," ")
if self.href:
self.put_link(self.href,txt)
else:
if DEBUG: print "write", txt, "*"
self.pdf.write(self.h,txt)
def box_shadow(self, w, h, bgcolor):
if DEBUG: print "box_shadow", w, h, bgcolor
if bgcolor:
fill_color = self.pdf.fill_color
self.pdf.set_fill_color(*bgcolor)
self.pdf.rect(self.pdf.x, self.pdf.y, w, h, 'F')
self.pdf.fill_color = fill_color
def output_table_header(self):
if self.theader:
b = self.b
x = self.pdf.x
self.set_style('B',True)
for cell, bgcolor in self.theader:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.set_style('B',b)
self.pdf.ln(self.theader[0][0][1])
self.pdf.set_x(x)
self.theader_out = True
def output_table_footer(self):
if self.tfooter:
x = self.pdf.x
#TODO: self.output_table_sep()
for cell, bgcolor in self.tfooter:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.pdf.ln(self.tfooter[0][0][1])
self.pdf.set_x(x)
#TODO: self.output_table_sep()
self.tfooter_out = True
def output_table_sep(self):
x1 = self.pdf.x
y1 = self.pdf.y
w = sum([self.width2mm(lenght) for lenght in self.table_col_width])
self.pdf.line(x1,y1,x1+w,y1)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if DEBUG: print "STARTTAG", tag, attrs
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag,1)
if tag=='a':
self.href=attrs['href']
if tag=='br':
self.pdf.ln(5)
if tag=='p':
self.pdf.ln(5)
if attrs:
self.align=attrs['align'].lower()
if tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'):
k = (2, 1.5, 1.17, 1, 0.83, 0.67)[int(tag[1])]
self.pdf.ln(5*k)
self.pdf.set_text_color(150,0,0)
self.pdf.set_font_size(12 * k)
if attrs: self.align = attrs.get('align')
if tag=='hr':
self.put_line()
if tag=='pre':
self.pdf.set_font('Courier','',11)
self.pdf.set_font_size(11)
self.set_style('B',False)
self.set_style('I',False)
self.pre = True
if tag=='blockquote':
self.set_text_color(100,0,45)
self.pdf.ln(3)
if tag=='ul':
self.indent+=1
self.bullet.append('\x95')
if tag=='ol':
self.indent+=1
self.bullet.append(0)
if tag=='li':
self.pdf.ln(self.h+2)
self.pdf.set_text_color(190,0,0)
bullet = self.bullet[self.indent-1]
if not isinstance(bullet, basestring):
bullet += 1
self.bullet[self.indent-1] = bullet
bullet = "%s. " % bullet
self.pdf.write(self.h,'%s%s ' % (' '*5*self.indent, bullet))
self.set_text_color()
if tag=='font':
if 'color' in attrs:
self.color = hex2dec(attrs['color'])
self.set_text_color(*color)
self.color = color
if 'face' in attrs and attrs['face'].lower() in self.font_list:
face = attrs.get('face').lower()
self.pdf.set_font(face)
self.font_face = face
if 'size' in attrs:
face = attrs.get('size')
self.pdf.set_font('', size)
self.font_size = size
if tag=='table':
self.table = dict([(k.lower(), v) for k,v in attrs.items()])
if not 'width' in self.table:
self.table['width'] = '100%'
if self.table['width'][-1]=='%':
w = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
w *= int(self.table['width'][:-1])/100.0
self.table_offset = (self.pdf.w-w)/2.0
self.table_col_width = []
self.theader_out = self.tfooter_out = False
self.theader = []
self.tfooter = []
self.thead = None
self.tfoot = None
self.pdf.ln()
if tag=='tr':
self.tr = dict([(k.lower(), v) for k,v in attrs.items()])
self.table_col_index = 0
self.pdf.set_x(self.table_offset)
if tag=='td':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
if tag=='th':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
self.th = True
if self.td['width']:
self.table_col_width.append(self.td['width'])
if tag=='thead':
self.thead = {}
if tag=='tfoot':
self.tfoot = {}
if tag=='img':
if 'src' in attrs:
x = self.pdf.get_x()
y = self.pdf.get_y()
w = px2mm(attrs.get('width', 0))
h = px2mm(attrs.get('height',0))
if self.align and self.align[0].upper() == 'C':
x = (self.pdf.w-x)/2.0 - w/2.0
self.pdf.image(attrs['src'], x, y, w, h, link=self.href)
self.pdf.set_x(x+w)
self.pdf.set_y(y+h)
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, True)
if tag=='center':
self.align = 'Center'
def handle_endtag(self, tag):
#Closing tag
if DEBUG: print "ENDTAG", tag
if tag=='h1' or tag=='h2' or tag=='h3' or tag=='h4':
self.pdf.ln(6)
self.set_font()
self.set_style()
self.align = None
if tag=='pre':
self.pdf.set_font(self.font or 'Times','',12)
self.pdf.set_font_size(12)
self.pre=False
if tag=='blockquote':
self.set_text_color(0,0,0)
self.pdf.ln(3)
if tag=='strong':
tag='b'
if tag=='em':
tag='i'
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, False)
if tag=='a':
self.href=''
if tag=='p':
self.align=''
if tag in ('ul', 'ol'):
self.indent-=1
self.bullet.pop()
if tag=='table':
if not self.tfooter_out:
self.output_table_footer()
self.table = None
self.th = False
self.theader = None
self.tfooter = None
self.pdf.ln()
if tag=='thead':
self.thead = None
if tag=='tfoot':
self.tfoot = None
if tag=='tbody':
# draw a line separator between table bodies
self.pdf.set_x(self.table_offset)
self.output_table_sep()
if tag=='tr':
h = self.table_h
if self.tfoot is None:
self.pdf.ln(h)
self.tr = None
if tag=='td' or tag=='th':
if self.th:
if DEBUG: print "revert style"
self.set_style('B', False) # revert style
self.table_col_index += int(self.td.get('colspan','1'))
self.td = None
self.th = False
if tag=='font':
if self.color:
self.pdf.set_text_color(0,0,0)
self.color = None
if self.font:
self.SetFont('Times','',12)
self.font = None
if tag=='center':
self.align = None
def set_font(self, face=None, size=None):
if face:
self.font_face = face
if size:
self.font_size = size
self.h = size / 72.0*25.4
if DEBUG: print "H", self.h
self.pdf.set_font(self.font_face or 'times','',12)
self.pdf.set_font_size(self.font_size or 12)
self.set_style('u', False)
self.set_style('b', False)
self.set_style('i', False)
self.set_text_color()
def set_style(self, tag=None, enable=None):
#Modify style and select corresponding font
if tag:
t = self.style.get(tag.lower())
self.style[tag.lower()] = enable
style=''
for s in ('b','i','u'):
if self.style.get(s):
style+=s
if DEBUG: print "SET_FONT_STYLE", style
self.pdf.set_font('',style)
def set_text_color(self, r=None, g=0, b=0):
if r is None:
self.pdf.set_text_color(self.r,self.g,self.b)
else:
self.pdf.set_text_color(r, g, b)
self.r = r
self.g = g
self.b = b
def put_link(self, url, txt):
#Put a hyperlink
self.set_text_color(0,0,255)
self.set_style('u', True)
self.pdf.write(5,txt,url)
self.set_style('u', False)
self.set_text_color(0)
def put_line(self):
self.pdf.ln(2)
self.pdf.line(self.pdf.get_x(),self.pdf.get_y(),self.pdf.get_x()+187,self.pdf.get_y())
self.pdf.ln(3)
class HTMLMixin():
def write_html(self, text):
"Parse HTML and convert it to PDF"
h2p = HTML2FPDF(self)
h2p.feed(text)
if __name__=='__main__':
html="""
<H1 align="center">html2fpdf</H1>
<h2>Basic usage</h2>
<p>You can now easily print text mixing different
styles : <B>bold</B>, <I>italic</I>, <U>underlined</U>, or
<B><I><U>all at once</U></I></B>!<BR>You can also insert links
on text, such as <A HREF="http://www.fpdf.org">www.fpdf.org</A>,
or on an image: click on the logo.<br>
<center>
<A HREF="http://www.fpdf.org"><img src="tutorial/logo.png" width="104" height="71"></A>
</center>
<h3>Sample List</h3>
<ul><li>option 1</li>
<ol><li>option 2</li></ol>
<li>option 3</li></ul>
<table border="0" align="center" width="50%">
<thead><tr><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead>
<tbody>
<tr><td>cell 1</td><td>cell 2</td></tr>
<tr><td>cell 2</td><td>cell 3</td></tr>
</tbody>
</table>
<table border="1">
<thead><tr bgcolor="#A0A0A0"><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead>
<tfoot><tr bgcolor="#E0E0E0"><td>footer 1</td><td>footer 2</td></tr></tfoot>
<tbody>
<tr><td>cell 1</td><td>cell 2</td></tr>
<tr>
<td width="30%">cell 1</td><td width="70%" bgcolor="#D0D0FF" align='right'>cell 2</td>
</tr>
</tbody>
<tbody><tr><td colspan="2">cell spanned</td></tr></tbody>
<tbody>
""" + """<tr bgcolor="#F0F0F0">
<td>cell 3</td><td>cell 4</td>
</tr><tr bgcolor="#FFFFFF">
<td>cell 5</td><td>cell 6</td>
</tr>""" * 200 + """
</tbody>
</table>
"""
class MyFPDF(FPDF, HTMLMixin):
def header(self):
self.image('tutorial/logo_pb.png',10,8,33)
self.set_font('Arial','B',15)
self.cell(80)
self.cell(30,10,'Title',1,0,'C')
self.ln(20)
def footer(self):
self.set_y(-15)
self.set_font('Arial','I',8)
txt = 'Page %s of %s' % (self.page_no(), self.alias_nb_pages())
self.cell(0,10,txt,0,0,'C')
pdf=MyFPDF()
#First page
pdf.add_page()
pdf.write_html(html)
pdf.output('html.pdf','F')
import os
os.system("evince html.pdf") | gluon/contrib/pyfpdf/html.py |
"HTML Renderer for FPDF.py"
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "Copyright (C) 2010 <NAME>"
__license__ = "LGPL 3.0"
# Inspired by tuto5.py and several examples from fpdf.org, html2fpdf, etc.
from fpdf import FPDF
from HTMLParser import HTMLParser
DEBUG = False
def px2mm(px):
return int(px)*25.4/72.0
def hex2dec(color = "#000000"):
if color:
r = int(color[1:3], 16)
g = int(color[3:5], 16)
b = int(color[5:7], 16)
return r, g, b
class HTML2FPDF(HTMLParser):
"Render basic HTML to FPDF"
def __init__(self, pdf):
HTMLParser.__init__(self)
self.style = {}
self.pre = False
self.href = ''
self.align = ''
self.page_links = {}
self.font_list = ("times","courier", "helvetica")
self.pdf = pdf
self.r = self.g = self.b = 0
self.indent = 0
self.bullet = []
self.set_font("times", 12)
self.table = None # table attributes
self.table_col_width = None # column (header) widths
self.table_col_index = None # current column index
self.td = None # cell attributes
self.th = False # header enabled
self.tr = None
self.theader = None # table header cells
self.tfooter = None # table footer cells
self.thead = None
self.tfoot = None
self.theader_out = self.tfooter_out = False
def width2mm(self, length):
if length[-1]=='%':
total = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
if self.table['width'][-1]=='%':
total *= int(self.table['width'][:-1])/100.0
return int(length[:-1]) * total / 101.0
else:
return int(length) / 6.0
def handle_data(self, txt):
if self.td is not None: # drawing a table?
if 'width' not in self.td and 'colspan' not in self.td:
l = [self.table_col_width[self.table_col_index]]
elif 'colspan' in self.td:
i = self.table_col_index
colspan = int(self.td['colspan'])
l = self.table_col_width[i:i+colspan]
else:
l = [self.td.get('width','240')]
w = sum([self.width2mm(lenght) for lenght in l])
h = int(self.td.get('height', 0)) / 4 or self.h*1.30
self.table_h = h
border = int(self.table.get('border', 0))
if not self.th:
align = self.td.get('align', 'L')[0].upper()
border = border and 'LR'
else:
self.set_style('B',True)
border = border or 'B'
align = 'C'
bgcolor = hex2dec(self.td.get('bgcolor', self.tr.get('bgcolor', '')))
# parsing table header/footer (drawn later):
if self.thead is not None:
self.theader.append(((w,h,txt,border,0,align), bgcolor))
if self.tfoot is not None:
self.tfooter.append(((w,h,txt,border,0,align), bgcolor))
# check if reached end of page, add table footer and header:
height = h + (self.tfooter and self.tfooter[0][0][1] or 0)
if self.pdf.y+height>self.pdf.page_break_trigger and not self.th:
self.output_table_footer()
self.pdf.add_page()
self.theader_out = self.tfooter_out = False
if self.tfoot is None and self.thead is None:
if not self.theader_out:
self.output_table_header()
self.box_shadow(w, h, bgcolor)
if DEBUG: print "td cell", self.pdf.x, w, txt, "*"
self.pdf.cell(w,h,txt,border,0,align)
elif self.table is not None:
# ignore anything else than td inside a table
pass
elif self.align:
if DEBUG: print "cell", txt, "*"
self.pdf.cell(0,self.h,txt,0,1,self.align[0].upper(), self.href)
else:
txt = txt.replace("\n"," ")
if self.href:
self.put_link(self.href,txt)
else:
if DEBUG: print "write", txt, "*"
self.pdf.write(self.h,txt)
def box_shadow(self, w, h, bgcolor):
if DEBUG: print "box_shadow", w, h, bgcolor
if bgcolor:
fill_color = self.pdf.fill_color
self.pdf.set_fill_color(*bgcolor)
self.pdf.rect(self.pdf.x, self.pdf.y, w, h, 'F')
self.pdf.fill_color = fill_color
def output_table_header(self):
if self.theader:
b = self.b
x = self.pdf.x
self.set_style('B',True)
for cell, bgcolor in self.theader:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.set_style('B',b)
self.pdf.ln(self.theader[0][0][1])
self.pdf.set_x(x)
self.theader_out = True
def output_table_footer(self):
if self.tfooter:
x = self.pdf.x
#TODO: self.output_table_sep()
for cell, bgcolor in self.tfooter:
self.box_shadow(cell[0], cell[1], bgcolor)
self.pdf.cell(*cell)
self.pdf.ln(self.tfooter[0][0][1])
self.pdf.set_x(x)
#TODO: self.output_table_sep()
self.tfooter_out = True
def output_table_sep(self):
x1 = self.pdf.x
y1 = self.pdf.y
w = sum([self.width2mm(lenght) for lenght in self.table_col_width])
self.pdf.line(x1,y1,x1+w,y1)
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if DEBUG: print "STARTTAG", tag, attrs
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag,1)
if tag=='a':
self.href=attrs['href']
if tag=='br':
self.pdf.ln(5)
if tag=='p':
self.pdf.ln(5)
if attrs:
self.align=attrs['align'].lower()
if tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'):
k = (2, 1.5, 1.17, 1, 0.83, 0.67)[int(tag[1])]
self.pdf.ln(5*k)
self.pdf.set_text_color(150,0,0)
self.pdf.set_font_size(12 * k)
if attrs: self.align = attrs.get('align')
if tag=='hr':
self.put_line()
if tag=='pre':
self.pdf.set_font('Courier','',11)
self.pdf.set_font_size(11)
self.set_style('B',False)
self.set_style('I',False)
self.pre = True
if tag=='blockquote':
self.set_text_color(100,0,45)
self.pdf.ln(3)
if tag=='ul':
self.indent+=1
self.bullet.append('\x95')
if tag=='ol':
self.indent+=1
self.bullet.append(0)
if tag=='li':
self.pdf.ln(self.h+2)
self.pdf.set_text_color(190,0,0)
bullet = self.bullet[self.indent-1]
if not isinstance(bullet, basestring):
bullet += 1
self.bullet[self.indent-1] = bullet
bullet = "%s. " % bullet
self.pdf.write(self.h,'%s%s ' % (' '*5*self.indent, bullet))
self.set_text_color()
if tag=='font':
if 'color' in attrs:
self.color = hex2dec(attrs['color'])
self.set_text_color(*color)
self.color = color
if 'face' in attrs and attrs['face'].lower() in self.font_list:
face = attrs.get('face').lower()
self.pdf.set_font(face)
self.font_face = face
if 'size' in attrs:
face = attrs.get('size')
self.pdf.set_font('', size)
self.font_size = size
if tag=='table':
self.table = dict([(k.lower(), v) for k,v in attrs.items()])
if not 'width' in self.table:
self.table['width'] = '100%'
if self.table['width'][-1]=='%':
w = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
w *= int(self.table['width'][:-1])/100.0
self.table_offset = (self.pdf.w-w)/2.0
self.table_col_width = []
self.theader_out = self.tfooter_out = False
self.theader = []
self.tfooter = []
self.thead = None
self.tfoot = None
self.pdf.ln()
if tag=='tr':
self.tr = dict([(k.lower(), v) for k,v in attrs.items()])
self.table_col_index = 0
self.pdf.set_x(self.table_offset)
if tag=='td':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
if tag=='th':
self.td = dict([(k.lower(), v) for k,v in attrs.items()])
self.th = True
if self.td['width']:
self.table_col_width.append(self.td['width'])
if tag=='thead':
self.thead = {}
if tag=='tfoot':
self.tfoot = {}
if tag=='img':
if 'src' in attrs:
x = self.pdf.get_x()
y = self.pdf.get_y()
w = px2mm(attrs.get('width', 0))
h = px2mm(attrs.get('height',0))
if self.align and self.align[0].upper() == 'C':
x = (self.pdf.w-x)/2.0 - w/2.0
self.pdf.image(attrs['src'], x, y, w, h, link=self.href)
self.pdf.set_x(x+w)
self.pdf.set_y(y+h)
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, True)
if tag=='center':
self.align = 'Center'
def handle_endtag(self, tag):
#Closing tag
if DEBUG: print "ENDTAG", tag
if tag=='h1' or tag=='h2' or tag=='h3' or tag=='h4':
self.pdf.ln(6)
self.set_font()
self.set_style()
self.align = None
if tag=='pre':
self.pdf.set_font(self.font or 'Times','',12)
self.pdf.set_font_size(12)
self.pre=False
if tag=='blockquote':
self.set_text_color(0,0,0)
self.pdf.ln(3)
if tag=='strong':
tag='b'
if tag=='em':
tag='i'
if tag=='b' or tag=='i' or tag=='u':
self.set_style(tag, False)
if tag=='a':
self.href=''
if tag=='p':
self.align=''
if tag in ('ul', 'ol'):
self.indent-=1
self.bullet.pop()
if tag=='table':
if not self.tfooter_out:
self.output_table_footer()
self.table = None
self.th = False
self.theader = None
self.tfooter = None
self.pdf.ln()
if tag=='thead':
self.thead = None
if tag=='tfoot':
self.tfoot = None
if tag=='tbody':
# draw a line separator between table bodies
self.pdf.set_x(self.table_offset)
self.output_table_sep()
if tag=='tr':
h = self.table_h
if self.tfoot is None:
self.pdf.ln(h)
self.tr = None
if tag=='td' or tag=='th':
if self.th:
if DEBUG: print "revert style"
self.set_style('B', False) # revert style
self.table_col_index += int(self.td.get('colspan','1'))
self.td = None
self.th = False
if tag=='font':
if self.color:
self.pdf.set_text_color(0,0,0)
self.color = None
if self.font:
self.SetFont('Times','',12)
self.font = None
if tag=='center':
self.align = None
def set_font(self, face=None, size=None):
if face:
self.font_face = face
if size:
self.font_size = size
self.h = size / 72.0*25.4
if DEBUG: print "H", self.h
self.pdf.set_font(self.font_face or 'times','',12)
self.pdf.set_font_size(self.font_size or 12)
self.set_style('u', False)
self.set_style('b', False)
self.set_style('i', False)
self.set_text_color()
def set_style(self, tag=None, enable=None):
#Modify style and select corresponding font
if tag:
t = self.style.get(tag.lower())
self.style[tag.lower()] = enable
style=''
for s in ('b','i','u'):
if self.style.get(s):
style+=s
if DEBUG: print "SET_FONT_STYLE", style
self.pdf.set_font('',style)
def set_text_color(self, r=None, g=0, b=0):
if r is None:
self.pdf.set_text_color(self.r,self.g,self.b)
else:
self.pdf.set_text_color(r, g, b)
self.r = r
self.g = g
self.b = b
def put_link(self, url, txt):
#Put a hyperlink
self.set_text_color(0,0,255)
self.set_style('u', True)
self.pdf.write(5,txt,url)
self.set_style('u', False)
self.set_text_color(0)
def put_line(self):
self.pdf.ln(2)
self.pdf.line(self.pdf.get_x(),self.pdf.get_y(),self.pdf.get_x()+187,self.pdf.get_y())
self.pdf.ln(3)
class HTMLMixin():
def write_html(self, text):
"Parse HTML and convert it to PDF"
h2p = HTML2FPDF(self)
h2p.feed(text)
if __name__=='__main__':
html="""
<H1 align="center">html2fpdf</H1>
<h2>Basic usage</h2>
<p>You can now easily print text mixing different
styles : <B>bold</B>, <I>italic</I>, <U>underlined</U>, or
<B><I><U>all at once</U></I></B>!<BR>You can also insert links
on text, such as <A HREF="http://www.fpdf.org">www.fpdf.org</A>,
or on an image: click on the logo.<br>
<center>
<A HREF="http://www.fpdf.org"><img src="tutorial/logo.png" width="104" height="71"></A>
</center>
<h3>Sample List</h3>
<ul><li>option 1</li>
<ol><li>option 2</li></ol>
<li>option 3</li></ul>
<table border="0" align="center" width="50%">
<thead><tr><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead>
<tbody>
<tr><td>cell 1</td><td>cell 2</td></tr>
<tr><td>cell 2</td><td>cell 3</td></tr>
</tbody>
</table>
<table border="1">
<thead><tr bgcolor="#A0A0A0"><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead>
<tfoot><tr bgcolor="#E0E0E0"><td>footer 1</td><td>footer 2</td></tr></tfoot>
<tbody>
<tr><td>cell 1</td><td>cell 2</td></tr>
<tr>
<td width="30%">cell 1</td><td width="70%" bgcolor="#D0D0FF" align='right'>cell 2</td>
</tr>
</tbody>
<tbody><tr><td colspan="2">cell spanned</td></tr></tbody>
<tbody>
""" + """<tr bgcolor="#F0F0F0">
<td>cell 3</td><td>cell 4</td>
</tr><tr bgcolor="#FFFFFF">
<td>cell 5</td><td>cell 6</td>
</tr>""" * 200 + """
</tbody>
</table>
"""
class MyFPDF(FPDF, HTMLMixin):
def header(self):
self.image('tutorial/logo_pb.png',10,8,33)
self.set_font('Arial','B',15)
self.cell(80)
self.cell(30,10,'Title',1,0,'C')
self.ln(20)
def footer(self):
self.set_y(-15)
self.set_font('Arial','I',8)
txt = 'Page %s of %s' % (self.page_no(), self.alias_nb_pages())
self.cell(0,10,txt,0,0,'C')
pdf=MyFPDF()
#First page
pdf.add_page()
pdf.write_html(html)
pdf.output('html.pdf','F')
import os
os.system("evince html.pdf") | 0.25618 | 0.128744 |
from __future__ import annotations
import abc
from typing import Any, Generic, List, Mapping, Sequence, TypeVar
from .userr import Err, Res
_Value = TypeVar("_Value")
_Item = TypeVar("_Item")
class Collector(abc.ABC, Generic[_Value]):
"""
Collects argument instances and computes the final value
"""
@abc.abstractmethod
def arg_required(self) -> bool:
"""
Returns whether one instance of the argument needs to be present
"""
@abc.abstractmethod
def collect(self, seq: Sequence[_Value]) -> Res[_Value]:
"""
Collects a sequence of values into a result
Args:
seq: Sequence of parsed values
Returns:
Either the consolidated value or an error
"""
@abc.abstractmethod
def argparse_argument_kwargs(self) -> Mapping[str, Any]:
"""
Returns the arguments using in documentation (piggy backing on argparse)
"""
@staticmethod
def keep_last() -> Collector[_Value]:
"""
Returns a collector that keeps the last value
"""
class _KeepLast(Collector[_Value]):
def arg_required(self) -> bool:
return True
def collect(self, seq: Sequence[_Value]) -> Res[_Value]:
if not seq: # no instances provided
return Err.make("Argument is required")
else: # instances are provided
return seq[-1]
def argparse_argument_kwargs(self) -> Mapping[str, Any]:
return {"action": "store"}
return _KeepLast()
@staticmethod
def append() -> Collector[Sequence[_Item]]:
"""
Returns a collector that appends sequences
"""
class _Append(Collector[Sequence[_Item]]):
def arg_required(self) -> bool:
return False
def collect(self, seq: Sequence[Sequence[_Item]]) -> Res[Sequence[_Item]]:
res: List[_Item] = []
for i in seq:
res.extend(i)
return res
def argparse_argument_kwargs(self) -> Mapping[str, Any]:
return {"action": "append"}
return _Append() | src/configpile/collector.py | from __future__ import annotations
import abc
from typing import Any, Generic, List, Mapping, Sequence, TypeVar
from .userr import Err, Res
_Value = TypeVar("_Value")
_Item = TypeVar("_Item")
class Collector(abc.ABC, Generic[_Value]):
"""
Collects argument instances and computes the final value
"""
@abc.abstractmethod
def arg_required(self) -> bool:
"""
Returns whether one instance of the argument needs to be present
"""
@abc.abstractmethod
def collect(self, seq: Sequence[_Value]) -> Res[_Value]:
"""
Collects a sequence of values into a result
Args:
seq: Sequence of parsed values
Returns:
Either the consolidated value or an error
"""
@abc.abstractmethod
def argparse_argument_kwargs(self) -> Mapping[str, Any]:
"""
Returns the arguments using in documentation (piggy backing on argparse)
"""
@staticmethod
def keep_last() -> Collector[_Value]:
"""
Returns a collector that keeps the last value
"""
class _KeepLast(Collector[_Value]):
def arg_required(self) -> bool:
return True
def collect(self, seq: Sequence[_Value]) -> Res[_Value]:
if not seq: # no instances provided
return Err.make("Argument is required")
else: # instances are provided
return seq[-1]
def argparse_argument_kwargs(self) -> Mapping[str, Any]:
return {"action": "store"}
return _KeepLast()
@staticmethod
def append() -> Collector[Sequence[_Item]]:
"""
Returns a collector that appends sequences
"""
class _Append(Collector[Sequence[_Item]]):
def arg_required(self) -> bool:
return False
def collect(self, seq: Sequence[Sequence[_Item]]) -> Res[Sequence[_Item]]:
res: List[_Item] = []
for i in seq:
res.extend(i)
return res
def argparse_argument_kwargs(self) -> Mapping[str, Any]:
return {"action": "append"}
return _Append() | 0.895125 | 0.322473 |
from spack import *
class BigdftLibabinit(AutotoolsPackage):
"""BigDFT-libABINIT: this is a subsection of files coming from ABINIT software package,
to which BigDFT has been coupled since the early days. It handles different parts
like symmetries, ewald corrections, PAW routines, density and potential mixing
routines and some MD minimizers."""
homepage = "https://bigdft.org/"
url = "https://gitlab.com/l_sim/bigdft-suite/-/archive/1.9.2/bigdft-suite-1.9.2.tar.gz"
git = "https://gitlab.com/l_sim/bigdft-suite.git"
version('develop', branch='devel')
version('1.9.2', sha256='dc9e49b68f122a9886fa0ef09970f62e7ba21bb9ab1b86be9b7d7e22ed8fbe0f')
version('1.9.1', sha256='3c334da26d2a201b572579fc1a7f8caad1cbf971e848a3e10d83bc4dc8c82e41')
version('1.9.0', sha256='4500e505f5a29d213f678a91d00a10fef9dc00860ea4b3edf9280f33ed0d1ac8')
version('1.8.3', sha256='f112bb08833da4d11dd0f14f7ab10d740b62bc924806d77c985eb04ae0629909')
version('1.8.2', sha256='042e5a3b478b1a4c050c450a9b1be7bcf8e13eacbce4759b7f2d79268b298d61')
version('1.8.1', sha256='e09ff0ba381f6ffbe6a3c0cb71db5b73117874beb41f22a982a7e5ba32d018b3')
variant('mpi', default=True, description='Enable MPI support')
depends_on('python@:2.8', type=('build', 'run'), when="@:1.8.3")
depends_on('python@3.0:', type=('build', 'run'), when="@1.9.0:")
depends_on('python@3.0:', type=('build', 'run'), when="@develop")
depends_on('mpi', when='+mpi')
depends_on('libxc@:2.2.2', when='@:1.9.1')
depends_on('libxc@:4.3.4', when='@1.9.1:')
depends_on('libxc@:4.3.4', when='@develop')
for vers in ['1.8.1', '1.8.2', '1.8.3', '1.9.0', '1.9.1', '1.9.2', 'develop']:
depends_on('bigdft-futile@{0}'.format(vers), when='@{0}'.format(vers))
patch('m_libpaw_mpi.F90.patch', when='@:1.8.2')
build_directory = "libABINIT"
def autoreconf(self, spec, prefix):
autoreconf = which('autoreconf')
with working_dir(self.build_directory):
if spec.satisfies('@:1.8.2'):
autoreconf('-i')
else:
autoreconf('-fi')
def configure_args(self):
spec = self.spec
prefix = self.prefix
args = [
"--with-libxc-libs=%s %s" % (spec['libxc'].libs.ld_flags,
spec['libxc'].libs.ld_flags + "f90"),
"--with-libxc-incs=%s" % spec['libxc'].headers.include_flags,
"--with-futile-libs=%s" % spec['bigdft-futile'].prefix.lib,
"--with-futile-incs=%s" % spec['bigdft-futile'].headers.include_flags,
"--with-moduledir=%s" % prefix.include,
"--prefix=%s" % prefix,
]
if '+mpi' in spec:
args.append("CC=%s" % spec['mpi'].mpicc)
args.append("CXX=%s" % spec['mpi'].mpicxx)
args.append("FC=%s" % spec['mpi'].mpifc)
args.append("F90=%s" % spec['mpi'].mpifc)
args.append("F77=%s" % spec['mpi'].mpif77)
else:
args.append("--disable-mpi")
return args
@property
def libs(self):
shared = "+shared" in self.spec
return find_libraries(
'libabinit', root=self.prefix, shared=shared, recursive=True
) | var/spack/repos/builtin/packages/bigdft-libabinit/package.py |
from spack import *
class BigdftLibabinit(AutotoolsPackage):
"""BigDFT-libABINIT: this is a subsection of files coming from ABINIT software package,
to which BigDFT has been coupled since the early days. It handles different parts
like symmetries, ewald corrections, PAW routines, density and potential mixing
routines and some MD minimizers."""
homepage = "https://bigdft.org/"
url = "https://gitlab.com/l_sim/bigdft-suite/-/archive/1.9.2/bigdft-suite-1.9.2.tar.gz"
git = "https://gitlab.com/l_sim/bigdft-suite.git"
version('develop', branch='devel')
version('1.9.2', sha256='dc9e49b68f122a9886fa0ef09970f62e7ba21bb9ab1b86be9b7d7e22ed8fbe0f')
version('1.9.1', sha256='3c334da26d2a201b572579fc1a7f8caad1cbf971e848a3e10d83bc4dc8c82e41')
version('1.9.0', sha256='4500e505f5a29d213f678a91d00a10fef9dc00860ea4b3edf9280f33ed0d1ac8')
version('1.8.3', sha256='f112bb08833da4d11dd0f14f7ab10d740b62bc924806d77c985eb04ae0629909')
version('1.8.2', sha256='042e5a3b478b1a4c050c450a9b1be7bcf8e13eacbce4759b7f2d79268b298d61')
version('1.8.1', sha256='e09ff0ba381f6ffbe6a3c0cb71db5b73117874beb41f22a982a7e5ba32d018b3')
variant('mpi', default=True, description='Enable MPI support')
depends_on('python@:2.8', type=('build', 'run'), when="@:1.8.3")
depends_on('python@3.0:', type=('build', 'run'), when="@1.9.0:")
depends_on('python@3.0:', type=('build', 'run'), when="@develop")
depends_on('mpi', when='+mpi')
depends_on('libxc@:2.2.2', when='@:1.9.1')
depends_on('libxc@:4.3.4', when='@1.9.1:')
depends_on('libxc@:4.3.4', when='@develop')
for vers in ['1.8.1', '1.8.2', '1.8.3', '1.9.0', '1.9.1', '1.9.2', 'develop']:
depends_on('bigdft-futile@{0}'.format(vers), when='@{0}'.format(vers))
patch('m_libpaw_mpi.F90.patch', when='@:1.8.2')
build_directory = "libABINIT"
def autoreconf(self, spec, prefix):
autoreconf = which('autoreconf')
with working_dir(self.build_directory):
if spec.satisfies('@:1.8.2'):
autoreconf('-i')
else:
autoreconf('-fi')
def configure_args(self):
spec = self.spec
prefix = self.prefix
args = [
"--with-libxc-libs=%s %s" % (spec['libxc'].libs.ld_flags,
spec['libxc'].libs.ld_flags + "f90"),
"--with-libxc-incs=%s" % spec['libxc'].headers.include_flags,
"--with-futile-libs=%s" % spec['bigdft-futile'].prefix.lib,
"--with-futile-incs=%s" % spec['bigdft-futile'].headers.include_flags,
"--with-moduledir=%s" % prefix.include,
"--prefix=%s" % prefix,
]
if '+mpi' in spec:
args.append("CC=%s" % spec['mpi'].mpicc)
args.append("CXX=%s" % spec['mpi'].mpicxx)
args.append("FC=%s" % spec['mpi'].mpifc)
args.append("F90=%s" % spec['mpi'].mpifc)
args.append("F77=%s" % spec['mpi'].mpif77)
else:
args.append("--disable-mpi")
return args
@property
def libs(self):
shared = "+shared" in self.spec
return find_libraries(
'libabinit', root=self.prefix, shared=shared, recursive=True
) | 0.748168 | 0.309428 |
import requests
import logging
import pandas as pd
from datetime import datetime, timedelta
import time
from sqlalchemy import create_engine
creds = {'usr': 'ONEBIT',
'pwd': '<PASSWORD>',
'hst': '127.0.0.1',
'prt': 3306,
'dbn': 'crypto_data'}
conn = create_engine('mysql+pymysql://{usr}:{pwd}@{hst}:{prt}/{dbn}'.format(**creds))
quotes = ['USDT', 'USDC']
bases = ['BTC', 'ETH']
class Binance_SPOT_MD:
def __init__(self, start_time: datetime, end_time: datetime):
self.url = "https://api.binance.com"
self.start_datetime = start_time.timestamp()
self.end_datetime = end_time.timestamp()
self.session = requests.session()
# initilize the logger
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh = logging.FileHandler('binance_spot_MD.log', mode='a')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.__freq_mapping = {
'60': '1m',
'180': '3m',
'300': '5m',
'900': '15m',
'1800': '30m',
'3600': '1h',
'14400': '4h',
'86400': '1d'
}
def get_instruments(self):
data = self.session.get(self.url + '/api/v3/exchangeInfo').json()
return [[item['symbol'], item['baseAsset'], item['quoteAsset'] ] for item in data['symbols'] if (item['baseAsset'] in bases or item['quoteAsset'] in quotes)]
def __get_kline_by_instrument(self, instrument_name, start_datetime, end_datetime, freq, baseAsset, quoteAsset):
data = self.session.get(self.url + "/api/v3/klines?symbol={}&interval={}&limit=1500&startTime={}&endTime={}".format(
instrument_name, self.__freq_mapping[str(freq)], 1000 * int(start_datetime), 1000 * int(end_datetime)
)).json()
data = pd.DataFrame(data,
columns=['Open time', 'open', 'high', 'low', "close", 'volume', 'close time', 'amount',
'trades', 'taker base', 'taker quote', 'ignore'])
if len(data.index) > 0:
data['start_datetime'] = data['Open time'].apply(lambda x: x // 1000)
data = data[['open', 'high', 'low', 'close', 'volume', 'amount', 'start_datetime']]
data['freq_seconds'] = freq
data['global_symbol'] = 'SPOT-{}/{}'.format(baseAsset,quoteAsset)
self.logger.info("Succesfully fetched {} kline @ {}".format(instrument_name, str(
datetime.fromtimestamp(self.start_datetime))))
return data
else:
self.logger.error("Failed to fetched {} kline @ {}".format(instrument_name, str(
datetime.fromtimestamp(self.start_datetime))))
return None
def get_klines(self, freq):
instruments = self.get_instruments()
#instruments = [['BTCUSDT', 'BTC', 'USDT']]
for instrument in instruments:
data = self.__get_kline_by_instrument(instrument_name=instrument[0], start_datetime=self.start_datetime,
end_datetime=self.end_datetime, freq=freq, baseAsset=instrument[1],
quoteAsset=instrument[2])
if data is not None:
#data.to_csv('test.csv')
#print(data['start_datetime'])
data = data[data['start_datetime'] < self.end_datetime]
data.to_sql(name='BINANCE_SPOT_OFFICIAL_KLINES', index=False, if_exists='append', con=conn, method='multi')
time.sleep(0.1)
if __name__ == "__main__":
binance_spot = Binance_SPOT_MD(start_time=datetime.now(), end_time=datetime.now())
start = datetime(2019, 1, 1, 0)
while start < datetime(2020, 9, 18):
print(start)
end = min(start + timedelta(minutes=1000), datetime(2020, 9, 18))
binance_spot.start_datetime = start.timestamp()
binance_spot.end_datetime = end.timestamp()
binance_spot.get_klines(freq=60)
start = end | binance/binance_spot_kline_rest.py | import requests
import logging
import pandas as pd
from datetime import datetime, timedelta
import time
from sqlalchemy import create_engine
creds = {'usr': 'ONEBIT',
'pwd': '<PASSWORD>',
'hst': '127.0.0.1',
'prt': 3306,
'dbn': 'crypto_data'}
conn = create_engine('mysql+pymysql://{usr}:{pwd}@{hst}:{prt}/{dbn}'.format(**creds))
quotes = ['USDT', 'USDC']
bases = ['BTC', 'ETH']
class Binance_SPOT_MD:
def __init__(self, start_time: datetime, end_time: datetime):
self.url = "https://api.binance.com"
self.start_datetime = start_time.timestamp()
self.end_datetime = end_time.timestamp()
self.session = requests.session()
# initilize the logger
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
fh = logging.FileHandler('binance_spot_MD.log', mode='a')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.__freq_mapping = {
'60': '1m',
'180': '3m',
'300': '5m',
'900': '15m',
'1800': '30m',
'3600': '1h',
'14400': '4h',
'86400': '1d'
}
def get_instruments(self):
data = self.session.get(self.url + '/api/v3/exchangeInfo').json()
return [[item['symbol'], item['baseAsset'], item['quoteAsset'] ] for item in data['symbols'] if (item['baseAsset'] in bases or item['quoteAsset'] in quotes)]
def __get_kline_by_instrument(self, instrument_name, start_datetime, end_datetime, freq, baseAsset, quoteAsset):
data = self.session.get(self.url + "/api/v3/klines?symbol={}&interval={}&limit=1500&startTime={}&endTime={}".format(
instrument_name, self.__freq_mapping[str(freq)], 1000 * int(start_datetime), 1000 * int(end_datetime)
)).json()
data = pd.DataFrame(data,
columns=['Open time', 'open', 'high', 'low', "close", 'volume', 'close time', 'amount',
'trades', 'taker base', 'taker quote', 'ignore'])
if len(data.index) > 0:
data['start_datetime'] = data['Open time'].apply(lambda x: x // 1000)
data = data[['open', 'high', 'low', 'close', 'volume', 'amount', 'start_datetime']]
data['freq_seconds'] = freq
data['global_symbol'] = 'SPOT-{}/{}'.format(baseAsset,quoteAsset)
self.logger.info("Succesfully fetched {} kline @ {}".format(instrument_name, str(
datetime.fromtimestamp(self.start_datetime))))
return data
else:
self.logger.error("Failed to fetched {} kline @ {}".format(instrument_name, str(
datetime.fromtimestamp(self.start_datetime))))
return None
def get_klines(self, freq):
instruments = self.get_instruments()
#instruments = [['BTCUSDT', 'BTC', 'USDT']]
for instrument in instruments:
data = self.__get_kline_by_instrument(instrument_name=instrument[0], start_datetime=self.start_datetime,
end_datetime=self.end_datetime, freq=freq, baseAsset=instrument[1],
quoteAsset=instrument[2])
if data is not None:
#data.to_csv('test.csv')
#print(data['start_datetime'])
data = data[data['start_datetime'] < self.end_datetime]
data.to_sql(name='BINANCE_SPOT_OFFICIAL_KLINES', index=False, if_exists='append', con=conn, method='multi')
time.sleep(0.1)
if __name__ == "__main__":
binance_spot = Binance_SPOT_MD(start_time=datetime.now(), end_time=datetime.now())
start = datetime(2019, 1, 1, 0)
while start < datetime(2020, 9, 18):
print(start)
end = min(start + timedelta(minutes=1000), datetime(2020, 9, 18))
binance_spot.start_datetime = start.timestamp()
binance_spot.end_datetime = end.timestamp()
binance_spot.get_klines(freq=60)
start = end | 0.232484 | 0.143938 |
import datetime
import unittest
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_repositories.mode_of_transport_distribution_repository import \
ModeOfTransportDistributionRepository
from conflowgen.domain_models.large_vehicle_schedule import Schedule
from conflowgen.previews.container_flow_by_vehicle_type_preview import \
ContainerFlowByVehicleTypePreview
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestContainerFlowByVehicleTypePreview(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
ModeOfTransportDistribution
])
now = datetime.datetime.now()
ModeOfTransportDistributionRepository().set_mode_of_transport_distributions({
ModeOfTransport.truck: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.train: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.barge: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.feeder: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
},
ModeOfTransport.deep_sea_vessel: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
}
})
self.preview = ContainerFlowByVehicleTypePreview(
start_date=now.date(),
end_date=(now + datetime.timedelta(weeks=2)).date(),
transportation_buffer=0.2
)
def test_with_no_schedules(self):
"""If no schedules are provided, no capacity is needed"""
empty_flow = self.preview.get_inbound_to_outbound_flow()
self.assertSetEqual(set(ModeOfTransport), set(empty_flow.keys()))
for mode_of_transport_from in ModeOfTransport:
flow_from_vehicle_type = empty_flow[mode_of_transport_from]
self.assertSetEqual(set(ModeOfTransport), set(flow_from_vehicle_type.keys()))
for mode_of_transport_to in ModeOfTransport:
capacity_in_teu = flow_from_vehicle_type[mode_of_transport_to]
self.assertEqual(capacity_in_teu, 0, f"Expect no flow from {mode_of_transport_from} to "
f"{mode_of_transport_to} but it was {capacity_in_teu}")
def test_with_single_arrival_schedules(self):
one_week_later = datetime.datetime.now() + datetime.timedelta(weeks=1)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=one_week_later.date(),
vehicle_arrives_at_time=one_week_later.time(),
average_vehicle_capacity=300,
average_moved_capacity=300,
vehicle_arrives_every_k_days=-1
)
schedule.save()
flow_with_one_entry = self.preview.get_inbound_to_outbound_flow()
self.assertSetEqual(set(ModeOfTransport), set(flow_with_one_entry.keys()))
uninvolved_vehicles = set(ModeOfTransport) - {ModeOfTransport.feeder, ModeOfTransport.truck}
for mode_of_transport_from in uninvolved_vehicles:
flow_from_vehicle_type = flow_with_one_entry[mode_of_transport_from]
self.assertSetEqual(set(ModeOfTransport), set(flow_from_vehicle_type.keys()))
for mode_of_transport_to in ModeOfTransport:
capacity_in_teu = flow_from_vehicle_type[mode_of_transport_to]
self.assertEqual(capacity_in_teu, 0, f"Expect no flow from {mode_of_transport_from} to "
f"{mode_of_transport_to} but it was {capacity_in_teu}")
flow_to_feeder = flow_with_one_entry[ModeOfTransport.feeder]
for mode_of_transport_to in (set(ModeOfTransport) - {ModeOfTransport.barge}):
transported_capacity = flow_to_feeder[mode_of_transport_to]
self.assertGreater(transported_capacity, 0)
flow_from_truck_to_feeder = flow_with_one_entry[ModeOfTransport.truck][ModeOfTransport.feeder]
self.assertGreater(flow_from_truck_to_feeder, 0, "Some containers must be delivered by truck for the feeder") | conflowgen/tests/previews/test_container_flow_by_vehicle_type_preview.py | import datetime
import unittest
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_repositories.mode_of_transport_distribution_repository import \
ModeOfTransportDistributionRepository
from conflowgen.domain_models.large_vehicle_schedule import Schedule
from conflowgen.previews.container_flow_by_vehicle_type_preview import \
ContainerFlowByVehicleTypePreview
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestContainerFlowByVehicleTypePreview(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
ModeOfTransportDistribution
])
now = datetime.datetime.now()
ModeOfTransportDistributionRepository().set_mode_of_transport_distributions({
ModeOfTransport.truck: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.train: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.barge: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.feeder: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
},
ModeOfTransport.deep_sea_vessel: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
}
})
self.preview = ContainerFlowByVehicleTypePreview(
start_date=now.date(),
end_date=(now + datetime.timedelta(weeks=2)).date(),
transportation_buffer=0.2
)
def test_with_no_schedules(self):
"""If no schedules are provided, no capacity is needed"""
empty_flow = self.preview.get_inbound_to_outbound_flow()
self.assertSetEqual(set(ModeOfTransport), set(empty_flow.keys()))
for mode_of_transport_from in ModeOfTransport:
flow_from_vehicle_type = empty_flow[mode_of_transport_from]
self.assertSetEqual(set(ModeOfTransport), set(flow_from_vehicle_type.keys()))
for mode_of_transport_to in ModeOfTransport:
capacity_in_teu = flow_from_vehicle_type[mode_of_transport_to]
self.assertEqual(capacity_in_teu, 0, f"Expect no flow from {mode_of_transport_from} to "
f"{mode_of_transport_to} but it was {capacity_in_teu}")
def test_with_single_arrival_schedules(self):
one_week_later = datetime.datetime.now() + datetime.timedelta(weeks=1)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=one_week_later.date(),
vehicle_arrives_at_time=one_week_later.time(),
average_vehicle_capacity=300,
average_moved_capacity=300,
vehicle_arrives_every_k_days=-1
)
schedule.save()
flow_with_one_entry = self.preview.get_inbound_to_outbound_flow()
self.assertSetEqual(set(ModeOfTransport), set(flow_with_one_entry.keys()))
uninvolved_vehicles = set(ModeOfTransport) - {ModeOfTransport.feeder, ModeOfTransport.truck}
for mode_of_transport_from in uninvolved_vehicles:
flow_from_vehicle_type = flow_with_one_entry[mode_of_transport_from]
self.assertSetEqual(set(ModeOfTransport), set(flow_from_vehicle_type.keys()))
for mode_of_transport_to in ModeOfTransport:
capacity_in_teu = flow_from_vehicle_type[mode_of_transport_to]
self.assertEqual(capacity_in_teu, 0, f"Expect no flow from {mode_of_transport_from} to "
f"{mode_of_transport_to} but it was {capacity_in_teu}")
flow_to_feeder = flow_with_one_entry[ModeOfTransport.feeder]
for mode_of_transport_to in (set(ModeOfTransport) - {ModeOfTransport.barge}):
transported_capacity = flow_to_feeder[mode_of_transport_to]
self.assertGreater(transported_capacity, 0)
flow_from_truck_to_feeder = flow_with_one_entry[ModeOfTransport.truck][ModeOfTransport.feeder]
self.assertGreater(flow_from_truck_to_feeder, 0, "Some containers must be delivered by truck for the feeder") | 0.567697 | 0.292873 |
from django.db import models
from django.core.cache import cache
from common import keys
def to_dict(self, *exclude):
'''
将 model 对象转换成一个属性字典
exclude: 需要排出的字段名
'''
attr_dict = {}
for field in self._meta.fields:
field_name = field.attname
if field_name not in exclude:
attr_dict[field_name] = getattr(self, field_name)
return attr_dict
def get(cls, *args, **kwargs):
"""先从缓存获取数据,缓存中没有,总数据库获取"""
pk = kwargs.get('id') or kwargs.get('pk') # 获取主键的值
if pk is not None:
key = keys.MODEL % (cls.__name__, pk) # 定义缓存 key
# 从缓存中获取数据
model_obj = cache.get(key)
if isinstance(model_obj, cls):
return model_obj
# 缓存里没有,从数据库获取
model_obj = cls.objects.get(*args, **kwargs)
# 将取出的数据写入缓存
key = keys.MODEL % (cls.__name__, model_obj.pk)
cache.set(key, model_obj)
return model_obj
def get_or_create(cls, defaults=None, **kwargs):
"""为 objects.get_or_create 添加缓存处理"""
pk = kwargs.get('id') or kwargs.get('pk') # 获取主键的值
if pk is not None:
key = keys.MODEL % (cls.__name__, pk) # 定义缓存 key
# 从缓存中获取数据
model_obj = cache.get(key)
if isinstance(model_obj, cls):
return model_obj, False
# 缓存里没有,执行原来的 get_or_create
model_obj, created = cls.objects.get_or_create(defaults, **kwargs)
# 将取出的数据写入缓存
key = keys.MODEL % (cls.__name__, model_obj.pk)
cache.set(key, model_obj)
return model_obj, created
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
'''添加了缓存处理的 save 方法'''
# 先将数据通过原 save 方法保存到数据库
self._save(force_insert=False, force_update=False, using=None, update_fields=None)
# 将 model 对象存入缓存
key = keys.MODEL % (self.__class__.__name__, self.pk)
cache.set(key, self)
def patch_model():
'''通过 MonkeyPatch 的方式为 Model 对象打补丁'''
# 动态为 Model 增加方法
models.Model.to_dict = to_dict
models.Model.get = classmethod(get)
models.Model.get_or_create = classmethod(get_or_create)
# 修改原 save 方法
models.Model._save = models.Model.save
models.Model.save = save | libs/orm.py | from django.db import models
from django.core.cache import cache
from common import keys
def to_dict(self, *exclude):
'''
将 model 对象转换成一个属性字典
exclude: 需要排出的字段名
'''
attr_dict = {}
for field in self._meta.fields:
field_name = field.attname
if field_name not in exclude:
attr_dict[field_name] = getattr(self, field_name)
return attr_dict
def get(cls, *args, **kwargs):
"""先从缓存获取数据,缓存中没有,总数据库获取"""
pk = kwargs.get('id') or kwargs.get('pk') # 获取主键的值
if pk is not None:
key = keys.MODEL % (cls.__name__, pk) # 定义缓存 key
# 从缓存中获取数据
model_obj = cache.get(key)
if isinstance(model_obj, cls):
return model_obj
# 缓存里没有,从数据库获取
model_obj = cls.objects.get(*args, **kwargs)
# 将取出的数据写入缓存
key = keys.MODEL % (cls.__name__, model_obj.pk)
cache.set(key, model_obj)
return model_obj
def get_or_create(cls, defaults=None, **kwargs):
"""为 objects.get_or_create 添加缓存处理"""
pk = kwargs.get('id') or kwargs.get('pk') # 获取主键的值
if pk is not None:
key = keys.MODEL % (cls.__name__, pk) # 定义缓存 key
# 从缓存中获取数据
model_obj = cache.get(key)
if isinstance(model_obj, cls):
return model_obj, False
# 缓存里没有,执行原来的 get_or_create
model_obj, created = cls.objects.get_or_create(defaults, **kwargs)
# 将取出的数据写入缓存
key = keys.MODEL % (cls.__name__, model_obj.pk)
cache.set(key, model_obj)
return model_obj, created
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
'''添加了缓存处理的 save 方法'''
# 先将数据通过原 save 方法保存到数据库
self._save(force_insert=False, force_update=False, using=None, update_fields=None)
# 将 model 对象存入缓存
key = keys.MODEL % (self.__class__.__name__, self.pk)
cache.set(key, self)
def patch_model():
'''通过 MonkeyPatch 的方式为 Model 对象打补丁'''
# 动态为 Model 增加方法
models.Model.to_dict = to_dict
models.Model.get = classmethod(get)
models.Model.get_or_create = classmethod(get_or_create)
# 修改原 save 方法
models.Model._save = models.Model.save
models.Model.save = save | 0.366703 | 0.094845 |
import py
import sys
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption('--assert',
action="store",
dest="assertmode",
choices=("rewrite", "reinterp", "plain",),
default="rewrite",
metavar="MODE",
help="""control assertion debugging tools. 'plain'
performs no assertion debugging. 'reinterp'
reinterprets assert statements after they failed
to provide assertion expression information.
'rewrite' (the default) rewrites assert
statements in test modules on import to
provide assert expression information. """)
group.addoption('--no-assert',
action="store_true",
default=False,
dest="noassert",
help="DEPRECATED equivalent to --assert=plain")
group.addoption('--nomagic', '--no-magic',
action="store_true",
default=False,
help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
def pytest_configure(config):
mode = config.getvalue("assertmode")
if config.getvalue("noassert") or config.getvalue("nomagic"):
mode = "plain"
if mode == "rewrite":
try:
import ast # noqa
except ImportError:
mode = "reinterp"
else:
# Both Jython and CPython 2.6.0 have AST bugs that make the
# assertion rewriting hook malfunction.
if (sys.platform.startswith('java') or
sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
hook = rewrite.AssertionRewritingHook() # noqa
sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
config._assertstate.trace("configured with mode set to %r" % (mode,))
def undo():
hook = config._assertstate.hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
hook = session.config._assertstate.hook
if hook is not None:
hook.set_session(session)
def pytest_runtest_setup(item):
"""Setup the pytest_assertrepr_compare hook
The newinterpret and rewrite modules will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
def callbinrepr(op, left, right):
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are dropped unles -vv was used.
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
for new_expl in hook_result:
if new_expl:
if (sum(len(p) for p in new_expl[1:]) > 80*8
and item.config.option.verbose < 2):
show_max = 10
truncated_lines = len(new_expl) - show_max
new_expl[show_max:] = [py.builtin._totext(
'Detailed information truncated (%d more lines)'
', use "-vv" to show' % truncated_lines)]
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = py.builtin._totext("\n~").join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
hook = session.config._assertstate.hook
if hook is not None:
hook.session = None
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
assert False
except AssertionError:
pass
else:
if mode == "rewrite":
specifically = ("assertions which are not in test modules "
"will be ignored")
else:
specifically = "failing tests may report as passing"
sys.stderr.write("WARNING: " + specifically +
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n")
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
pytest_assertrepr_compare = util.assertrepr_compare | venv/lib/python3.6/site-packages/_pytest/assertion/__init__.py | import py
import sys
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption('--assert',
action="store",
dest="assertmode",
choices=("rewrite", "reinterp", "plain",),
default="rewrite",
metavar="MODE",
help="""control assertion debugging tools. 'plain'
performs no assertion debugging. 'reinterp'
reinterprets assert statements after they failed
to provide assertion expression information.
'rewrite' (the default) rewrites assert
statements in test modules on import to
provide assert expression information. """)
group.addoption('--no-assert',
action="store_true",
default=False,
dest="noassert",
help="DEPRECATED equivalent to --assert=plain")
group.addoption('--nomagic', '--no-magic',
action="store_true",
default=False,
help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
def pytest_configure(config):
mode = config.getvalue("assertmode")
if config.getvalue("noassert") or config.getvalue("nomagic"):
mode = "plain"
if mode == "rewrite":
try:
import ast # noqa
except ImportError:
mode = "reinterp"
else:
# Both Jython and CPython 2.6.0 have AST bugs that make the
# assertion rewriting hook malfunction.
if (sys.platform.startswith('java') or
sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
hook = rewrite.AssertionRewritingHook() # noqa
sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
config._assertstate.trace("configured with mode set to %r" % (mode,))
def undo():
hook = config._assertstate.hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
hook = session.config._assertstate.hook
if hook is not None:
hook.set_session(session)
def pytest_runtest_setup(item):
"""Setup the pytest_assertrepr_compare hook
The newinterpret and rewrite modules will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
def callbinrepr(op, left, right):
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are dropped unles -vv was used.
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
for new_expl in hook_result:
if new_expl:
if (sum(len(p) for p in new_expl[1:]) > 80*8
and item.config.option.verbose < 2):
show_max = 10
truncated_lines = len(new_expl) - show_max
new_expl[show_max:] = [py.builtin._totext(
'Detailed information truncated (%d more lines)'
', use "-vv" to show' % truncated_lines)]
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = py.builtin._totext("\n~").join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
hook = session.config._assertstate.hook
if hook is not None:
hook.session = None
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
assert False
except AssertionError:
pass
else:
if mode == "rewrite":
specifically = ("assertions which are not in test modules "
"will be ignored")
else:
specifically = "failing tests may report as passing"
sys.stderr.write("WARNING: " + specifically +
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n")
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
pytest_assertrepr_compare = util.assertrepr_compare | 0.300438 | 0.325949 |
import unit_tests.utils as ut_utils
import zaza.model as model
import zaza.openstack.utilities.ceph as ceph_utils
import zaza.openstack.utilities.openstack as openstack_utils
class TestCephUtils(ut_utils.BaseTestCase):
def setUp(self):
super(TestCephUtils, self).setUp()
def _test_expected_pools(self,
os_release_pair,
expected_pools,
radosgw=False):
self.get_current_os_release_pair.return_value = os_release_pair
actual_pools = ceph_utils.get_expected_pools(radosgw)
self.assertEqual(expected_pools, actual_pools)
def test_get_expected_pools(self):
self.patch_object(openstack_utils, 'get_current_os_release_pair')
# Trusty Icehouse
os_release_pair = 'trusty_icehouse'
self.get_current_os_release_pair.return_value = 'trusty_icehouse'
expected_pools = [
'data',
'metadata',
'rbd',
'cinder-ceph',
'glance'
]
self._test_expected_pools(os_release_pair, expected_pools)
# Xenial Ocata
os_release_pair = 'xenial_ocata'
expected_pools = [
'rbd',
'cinder-ceph',
'glance'
]
self._test_expected_pools(os_release_pair, expected_pools)
# Xenial Queens
os_release_pair = 'xenial_queens'
expected_pools = [
'cinder-ceph',
'glance'
]
self._test_expected_pools(os_release_pair, expected_pools)
# Xenial Queens with radosgw
os_release_pair = 'xenial_queens'
expected_pools = [
'cinder-ceph',
'glance',
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
]
self._test_expected_pools(os_release_pair, expected_pools, True)
def test_get_ceph_pools(self):
self.patch_object(model, 'run_on_unit')
# Bad return code
result = {
'Code': '1',
'Stdout': '',
'Stderr': 'something went wrong',
}
self.run_on_unit.return_value = result
with self.assertRaises(model.CommandRunFailed):
ceph_utils.get_ceph_pools('ceph-mon/0')
# Xenial Queens output
result = {
'Code': '0',
'Stdout': '1 cinder-ceph,2 glance,',
'Stderr': ''
}
self.run_on_unit.return_value = result
expected = {
'cinder-ceph': 1,
'glance': 2
}
actual = ceph_utils.get_ceph_pools('ceph-mon/0')
self.assertEqual(expected, actual)
# Bionic Queens output
result = {
'Code': '0',
'Stdout': '1 cinder-ceph\n2 glance',
'Stderr': ''
}
self.run_on_unit.return_value = result
expected = {
'cinder-ceph': 1,
'glance': 2
}
actual = ceph_utils.get_ceph_pools('ceph-mon/0')
self.assertEqual(expected, actual)
def test_get_rbd_hash(self):
self.patch_object(ceph_utils.zaza_model, 'run_on_unit')
self.run_on_unit.return_value = {'Stdout': 'output\n', 'Code': '0'}
self.assertEqual(ceph_utils.get_rbd_hash('aunit', 'apool',
'aimage',
model_name='amodel'),
'output')
cmd = 'sudo rbd -p apool export --no-progress aimage - | sha512sum'
self.run_on_unit.assert_called_once_with('aunit', cmd,
model_name='amodel')
self.run_on_unit.return_value = {'Stdout': 'output', 'Code': '1'}
with self.assertRaises(model.CommandRunFailed):
ceph_utils.get_rbd_hash('aunit', 'apool', 'aimage',
model_name='amodel')
def test_pools_from_broker_req(self):
self.patch_object(ceph_utils.juju_utils, 'get_relation_from_unit')
self.get_relation_from_unit.return_value = {
'broker_req': (
'{"api-version": 1, "ops": ['
'{"op": "create-pool", "name": "cinder-ceph", '
'"compression-mode": null},'
'{"op": "create-pool", "name": "cinder-ceph", '
'"compression-mode": "aggressive"}]}'),
}
self.assertEquals(
ceph_utils.get_pools_from_broker_req(
'anApplication', 'aModelName'),
['cinder-ceph'])
self.get_relation_from_unit.assert_called_once_with(
'ceph-mon', 'anApplication', None, model_name='aModelName') | unit_tests/utilities/test_zaza_utilities_ceph.py | import unit_tests.utils as ut_utils
import zaza.model as model
import zaza.openstack.utilities.ceph as ceph_utils
import zaza.openstack.utilities.openstack as openstack_utils
class TestCephUtils(ut_utils.BaseTestCase):
def setUp(self):
super(TestCephUtils, self).setUp()
def _test_expected_pools(self,
os_release_pair,
expected_pools,
radosgw=False):
self.get_current_os_release_pair.return_value = os_release_pair
actual_pools = ceph_utils.get_expected_pools(radosgw)
self.assertEqual(expected_pools, actual_pools)
def test_get_expected_pools(self):
self.patch_object(openstack_utils, 'get_current_os_release_pair')
# Trusty Icehouse
os_release_pair = 'trusty_icehouse'
self.get_current_os_release_pair.return_value = 'trusty_icehouse'
expected_pools = [
'data',
'metadata',
'rbd',
'cinder-ceph',
'glance'
]
self._test_expected_pools(os_release_pair, expected_pools)
# Xenial Ocata
os_release_pair = 'xenial_ocata'
expected_pools = [
'rbd',
'cinder-ceph',
'glance'
]
self._test_expected_pools(os_release_pair, expected_pools)
# Xenial Queens
os_release_pair = 'xenial_queens'
expected_pools = [
'cinder-ceph',
'glance'
]
self._test_expected_pools(os_release_pair, expected_pools)
# Xenial Queens with radosgw
os_release_pair = 'xenial_queens'
expected_pools = [
'cinder-ceph',
'glance',
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
]
self._test_expected_pools(os_release_pair, expected_pools, True)
def test_get_ceph_pools(self):
self.patch_object(model, 'run_on_unit')
# Bad return code
result = {
'Code': '1',
'Stdout': '',
'Stderr': 'something went wrong',
}
self.run_on_unit.return_value = result
with self.assertRaises(model.CommandRunFailed):
ceph_utils.get_ceph_pools('ceph-mon/0')
# Xenial Queens output
result = {
'Code': '0',
'Stdout': '1 cinder-ceph,2 glance,',
'Stderr': ''
}
self.run_on_unit.return_value = result
expected = {
'cinder-ceph': 1,
'glance': 2
}
actual = ceph_utils.get_ceph_pools('ceph-mon/0')
self.assertEqual(expected, actual)
# Bionic Queens output
result = {
'Code': '0',
'Stdout': '1 cinder-ceph\n2 glance',
'Stderr': ''
}
self.run_on_unit.return_value = result
expected = {
'cinder-ceph': 1,
'glance': 2
}
actual = ceph_utils.get_ceph_pools('ceph-mon/0')
self.assertEqual(expected, actual)
def test_get_rbd_hash(self):
self.patch_object(ceph_utils.zaza_model, 'run_on_unit')
self.run_on_unit.return_value = {'Stdout': 'output\n', 'Code': '0'}
self.assertEqual(ceph_utils.get_rbd_hash('aunit', 'apool',
'aimage',
model_name='amodel'),
'output')
cmd = 'sudo rbd -p apool export --no-progress aimage - | sha512sum'
self.run_on_unit.assert_called_once_with('aunit', cmd,
model_name='amodel')
self.run_on_unit.return_value = {'Stdout': 'output', 'Code': '1'}
with self.assertRaises(model.CommandRunFailed):
ceph_utils.get_rbd_hash('aunit', 'apool', 'aimage',
model_name='amodel')
def test_pools_from_broker_req(self):
self.patch_object(ceph_utils.juju_utils, 'get_relation_from_unit')
self.get_relation_from_unit.return_value = {
'broker_req': (
'{"api-version": 1, "ops": ['
'{"op": "create-pool", "name": "cinder-ceph", '
'"compression-mode": null},'
'{"op": "create-pool", "name": "cinder-ceph", '
'"compression-mode": "aggressive"}]}'),
}
self.assertEquals(
ceph_utils.get_pools_from_broker_req(
'anApplication', 'aModelName'),
['cinder-ceph'])
self.get_relation_from_unit.assert_called_once_with(
'ceph-mon', 'anApplication', None, model_name='aModelName') | 0.594316 | 0.408336 |
import os
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
import math
from utils import get_boxes, generate_gt
class MLTDetection(data.Dataset):
def __init__(self, root, split, transform=None, target_transform=None, dim=(512, 512)):
self.root = root
self.split = split
self.transform = transform
self.target_transform = target_transform
self.dim = dim
if self.split == 'train':
all_items = os.listdir(self.root + '/training/')
imgs = [img for img in all_items if img.strip().split('.')[-1] == 'jpg']
gts = ['gt_' + x.strip().split('.')[0] + '.txt' for x in imgs]
image_paths = [self.root + '/training/' + x for x in imgs]
gt_paths = [self.root + '/training/' + x for x in gts]
self.image_paths = image_paths
self.targets = get_boxes(gt_paths)
elif self.split == 'val':
all_items = os.listdir(self.root + '/validation/')
imgs = [img for img in all_items if img.strip().split('.')[-1] == 'jpg']
gts = ['gt_' + x.strip().slit('.')[0] + '.txt' for x in imgs]
image_paths = [self.root + '/validation/' + x for x in imgs]
gt_paths = [self.root + '/validation/' + x for x in gts]
self.image_paths = image_paths
self.targets = get_boxes(gt_paths)
else:
all_items = os.listdir(self.root + '/test/')
image_paths = [self.root + '/test/' + x for x in all_items]
self.image_paths = image_paths
self.targets = []
def __getitem__(self, index):
return self.pull_item(index)
def __len__(self):
return len(self.image_paths)
def pull_item(self, index, model='debug'):
img_path = self.image_paths[index]
#target = self.targets[index]
img = cv2.imread(img_path)
height, width, channels = img.shape
if self.split != 'train':
img = cv2.resize(img, (self.dim[1], self.dim[0])).astype(np.float64)
img -= np.array([104.00698793, 116.66876762, 122.67891434]) ## mean -bgr
img = img[:, :, (2, 1, 0)] ## rgb
return torch.from_numpy(img).permute(2, 0, 1).float(), img_path, height, width
else:
target = self.targets[index]
## get rotate rect [x1, y1, x2, y2, x3, y3, x4, y4, diff]
target = self.target_transform(target)
assert(self.transform)
target = np.array(target)
img, boxes, labels = self.transform(img, target, None)
img = img[:, :, (2, 1, 0)]
target, seg = generate_gt(boxes)
return torch.from_numpy(img).permute(2, 0, 1), target, seg | data/mlt.py |
import os
import os.path
import sys
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
import math
from utils import get_boxes, generate_gt
class MLTDetection(data.Dataset):
def __init__(self, root, split, transform=None, target_transform=None, dim=(512, 512)):
self.root = root
self.split = split
self.transform = transform
self.target_transform = target_transform
self.dim = dim
if self.split == 'train':
all_items = os.listdir(self.root + '/training/')
imgs = [img for img in all_items if img.strip().split('.')[-1] == 'jpg']
gts = ['gt_' + x.strip().split('.')[0] + '.txt' for x in imgs]
image_paths = [self.root + '/training/' + x for x in imgs]
gt_paths = [self.root + '/training/' + x for x in gts]
self.image_paths = image_paths
self.targets = get_boxes(gt_paths)
elif self.split == 'val':
all_items = os.listdir(self.root + '/validation/')
imgs = [img for img in all_items if img.strip().split('.')[-1] == 'jpg']
gts = ['gt_' + x.strip().slit('.')[0] + '.txt' for x in imgs]
image_paths = [self.root + '/validation/' + x for x in imgs]
gt_paths = [self.root + '/validation/' + x for x in gts]
self.image_paths = image_paths
self.targets = get_boxes(gt_paths)
else:
all_items = os.listdir(self.root + '/test/')
image_paths = [self.root + '/test/' + x for x in all_items]
self.image_paths = image_paths
self.targets = []
def __getitem__(self, index):
return self.pull_item(index)
def __len__(self):
return len(self.image_paths)
def pull_item(self, index, model='debug'):
img_path = self.image_paths[index]
#target = self.targets[index]
img = cv2.imread(img_path)
height, width, channels = img.shape
if self.split != 'train':
img = cv2.resize(img, (self.dim[1], self.dim[0])).astype(np.float64)
img -= np.array([104.00698793, 116.66876762, 122.67891434]) ## mean -bgr
img = img[:, :, (2, 1, 0)] ## rgb
return torch.from_numpy(img).permute(2, 0, 1).float(), img_path, height, width
else:
target = self.targets[index]
## get rotate rect [x1, y1, x2, y2, x3, y3, x4, y4, diff]
target = self.target_transform(target)
assert(self.transform)
target = np.array(target)
img, boxes, labels = self.transform(img, target, None)
img = img[:, :, (2, 1, 0)]
target, seg = generate_gt(boxes)
return torch.from_numpy(img).permute(2, 0, 1), target, seg | 0.333612 | 0.348119 |
import os
import pytest
from buildstream.exceptions import ErrorDomain, LoadErrorReason
from buildstream.testing import cli # pylint: disable=unused-import
from tests.testutils import filetypegenerator
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "patch",)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_missing_patch(cli, datafiles):
project = str(datafiles)
# Removing the local file causes preflight to fail
localfile = os.path.join(project, "file_1.patch")
os.remove(localfile)
result = cli.run(project=project, args=["show", "target.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_non_regular_file_patch(cli, datafiles):
project = str(datafiles)
patch_path = os.path.join(project, "irregular_file.patch")
for _file_type in filetypegenerator.generate_file_types(patch_path):
result = cli.run(project=project, args=["show", "irregular.bst"])
if os.path.isfile(patch_path) and not os.path.islink(patch_path):
result.assert_success()
else:
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_invalid_absolute_path(cli, datafiles):
project = str(datafiles)
with open(os.path.join(project, "target.bst"), "r", encoding="utf-8") as f:
old_yaml = f.read()
new_yaml = old_yaml.replace("file_1.patch", os.path.join(project, "file_1.patch"))
assert old_yaml != new_yaml
with open(os.path.join(project, "target.bst"), "w", encoding="utf-8") as f:
f.write(new_yaml)
result = cli.run(project=project, args=["show", "target.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "invalid-relative-path"))
def test_invalid_relative_path(cli, datafiles):
project = str(datafiles)
result = cli.run(project=project, args=["show", "irregular.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_stage_and_patch(cli, tmpdir, datafiles):
project = str(datafiles)
checkoutdir = os.path.join(str(tmpdir), "checkout")
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir])
result.assert_success()
# Test the file.txt was patched and changed
with open(os.path.join(checkoutdir, "file.txt"), encoding="utf-8") as f:
assert f.read() == "This is text file with superpowers\n"
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_stage_file_nonexistent_dir(cli, datafiles):
project = str(datafiles)
# Fails at build time because it tries to patch into a non-existing directory
result = cli.run(project=project, args=["build", "failure-nonexistent-dir.bst"])
result.assert_main_error(ErrorDomain.STREAM, None)
result.assert_task_error(ErrorDomain.SOURCE, "patch-no-files")
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_stage_file_empty_dir(cli, datafiles):
project = str(datafiles)
# Fails at build time because it tries to patch with nothing else staged
result = cli.run(project=project, args=["build", "failure-empty-dir.bst"])
result.assert_main_error(ErrorDomain.STREAM, None)
result.assert_task_error(ErrorDomain.SOURCE, "patch-no-files")
@pytest.mark.datafiles(os.path.join(DATA_DIR, "separate-patch-dir"))
def test_stage_separate_patch_dir(cli, tmpdir, datafiles):
project = str(datafiles)
checkoutdir = os.path.join(str(tmpdir), "checkout")
# Track, fetch, build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir])
result.assert_success()
# Test the file.txt was patched and changed
with open(os.path.join(checkoutdir, "test-dir", "file.txt"), encoding="utf-8") as f:
assert f.read() == "This is text file in a directory with superpowers\n"
@pytest.mark.datafiles(os.path.join(DATA_DIR, "multiple-patches"))
def test_stage_multiple_patches(cli, tmpdir, datafiles):
project = str(datafiles)
checkoutdir = os.path.join(str(tmpdir), "checkout")
# Track, fetch, build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir])
result.assert_success()
# Test the file.txt was patched and changed
with open(os.path.join(checkoutdir, "file.txt"), encoding="utf-8") as f:
assert f.read() == "This is text file with more superpowers\n"
@pytest.mark.datafiles(os.path.join(DATA_DIR, "different-strip-level"))
def test_patch_strip_level(cli, tmpdir, datafiles):
project = str(datafiles)
checkoutdir = os.path.join(str(tmpdir), "checkout")
# Track, fetch, build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir])
result.assert_success()
# Test the file.txt was patched and changed
with open(os.path.join(checkoutdir, "file.txt"), encoding="utf-8") as f:
assert f.read() == "This is text file with superpowers\n" | tests/sources/patch.py |
import os
import pytest
from buildstream.exceptions import ErrorDomain, LoadErrorReason
from buildstream.testing import cli # pylint: disable=unused-import
from tests.testutils import filetypegenerator
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "patch",)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_missing_patch(cli, datafiles):
project = str(datafiles)
# Removing the local file causes preflight to fail
localfile = os.path.join(project, "file_1.patch")
os.remove(localfile)
result = cli.run(project=project, args=["show", "target.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_non_regular_file_patch(cli, datafiles):
project = str(datafiles)
patch_path = os.path.join(project, "irregular_file.patch")
for _file_type in filetypegenerator.generate_file_types(patch_path):
result = cli.run(project=project, args=["show", "irregular.bst"])
if os.path.isfile(patch_path) and not os.path.islink(patch_path):
result.assert_success()
else:
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_invalid_absolute_path(cli, datafiles):
project = str(datafiles)
with open(os.path.join(project, "target.bst"), "r", encoding="utf-8") as f:
old_yaml = f.read()
new_yaml = old_yaml.replace("file_1.patch", os.path.join(project, "file_1.patch"))
assert old_yaml != new_yaml
with open(os.path.join(project, "target.bst"), "w", encoding="utf-8") as f:
f.write(new_yaml)
result = cli.run(project=project, args=["show", "target.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "invalid-relative-path"))
def test_invalid_relative_path(cli, datafiles):
project = str(datafiles)
result = cli.run(project=project, args=["show", "irregular.bst"])
result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID)
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_stage_and_patch(cli, tmpdir, datafiles):
project = str(datafiles)
checkoutdir = os.path.join(str(tmpdir), "checkout")
# Build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir])
result.assert_success()
# Test the file.txt was patched and changed
with open(os.path.join(checkoutdir, "file.txt"), encoding="utf-8") as f:
assert f.read() == "This is text file with superpowers\n"
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_stage_file_nonexistent_dir(cli, datafiles):
project = str(datafiles)
# Fails at build time because it tries to patch into a non-existing directory
result = cli.run(project=project, args=["build", "failure-nonexistent-dir.bst"])
result.assert_main_error(ErrorDomain.STREAM, None)
result.assert_task_error(ErrorDomain.SOURCE, "patch-no-files")
@pytest.mark.datafiles(os.path.join(DATA_DIR, "basic"))
def test_stage_file_empty_dir(cli, datafiles):
project = str(datafiles)
# Fails at build time because it tries to patch with nothing else staged
result = cli.run(project=project, args=["build", "failure-empty-dir.bst"])
result.assert_main_error(ErrorDomain.STREAM, None)
result.assert_task_error(ErrorDomain.SOURCE, "patch-no-files")
@pytest.mark.datafiles(os.path.join(DATA_DIR, "separate-patch-dir"))
def test_stage_separate_patch_dir(cli, tmpdir, datafiles):
project = str(datafiles)
checkoutdir = os.path.join(str(tmpdir), "checkout")
# Track, fetch, build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir])
result.assert_success()
# Test the file.txt was patched and changed
with open(os.path.join(checkoutdir, "test-dir", "file.txt"), encoding="utf-8") as f:
assert f.read() == "This is text file in a directory with superpowers\n"
@pytest.mark.datafiles(os.path.join(DATA_DIR, "multiple-patches"))
def test_stage_multiple_patches(cli, tmpdir, datafiles):
project = str(datafiles)
checkoutdir = os.path.join(str(tmpdir), "checkout")
# Track, fetch, build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir])
result.assert_success()
# Test the file.txt was patched and changed
with open(os.path.join(checkoutdir, "file.txt"), encoding="utf-8") as f:
assert f.read() == "This is text file with more superpowers\n"
@pytest.mark.datafiles(os.path.join(DATA_DIR, "different-strip-level"))
def test_patch_strip_level(cli, tmpdir, datafiles):
project = str(datafiles)
checkoutdir = os.path.join(str(tmpdir), "checkout")
# Track, fetch, build, checkout
result = cli.run(project=project, args=["build", "target.bst"])
result.assert_success()
result = cli.run(project=project, args=["artifact", "checkout", "target.bst", "--directory", checkoutdir])
result.assert_success()
# Test the file.txt was patched and changed
with open(os.path.join(checkoutdir, "file.txt"), encoding="utf-8") as f:
assert f.read() == "This is text file with superpowers\n" | 0.409221 | 0.313144 |
"""Tests for minimization utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class MinimizeTests(test_util.TestCase):
def test_custom_trace_fn(self):
init_x = np.array([0., 0.]).astype(np.float32)
target_x = np.array([3., 4.]).astype(np.float32)
x = tf.Variable(init_x)
loss_fn = lambda: tf.reduce_sum((x - target_x)**2)
# The trace_fn should determine the structure and values of the results.
def trace_fn(traceable_quantities):
return {'loss': traceable_quantities.loss, 'x': x,
'sqdiff': (x - target_x)**2}
results = tfp.math.minimize(loss_fn, num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
trace_fn=trace_fn)
self.evaluate(tf1.global_variables_initializer())
results_ = self.evaluate(results)
self.assertAllClose(results_['x'][0], init_x, atol=0.5)
self.assertAllClose(results_['x'][-1], target_x, atol=0.2)
self.assertAllClose(results_['sqdiff'][-1], [0., 0.], atol=0.1)
def test_can_trace_all_traceable_quantities(self):
x = tf.Variable(5.0)
trace_fn = lambda traceable_quantities: traceable_quantities
results = tfp.math.minimize(loss_fn=lambda: tf.reduce_sum((x - 1.0)**2),
num_steps=10,
optimizer=tf.optimizers.Adam(0.1),
trace_fn=trace_fn)
self.evaluate(tf1.global_variables_initializer())
self.evaluate(results)
def test_respects_trainable_variables(self):
# Variables not included in `trainable_variables` should stay fixed.
x = tf.Variable(5.)
y = tf.Variable(2.)
loss_fn = lambda: tf.reduce_sum((x - y)**2)
loss = tfp.math.minimize(loss_fn, num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
trainable_variables=[x])
with tf.control_dependencies([loss]):
final_x = tf.identity(x)
final_y = tf.identity(y)
self.evaluate(tf1.global_variables_initializer())
final_x_, final_y_ = self.evaluate((final_x, final_y))
self.assertAllClose(final_x_, 2, atol=0.1)
self.assertEqual(final_y_, 2.) # `y` was untrained, so should be unchanged.
def test_works_when_results_have_dynamic_shape(self):
# Create a variable (and thus loss) with dynamically-shaped result.
x = tf.Variable(initial_value=tf1.placeholder_with_default(
[5., 3.], shape=None))
num_steps = 10
losses, grads = tfp.math.minimize(
loss_fn=lambda: (x - 2.)**2,
num_steps=num_steps,
# TODO(b/137299119) Replace with TF2 optimizer.
optimizer=tf1.train.AdamOptimizer(0.1),
trace_fn=lambda t: (t.loss, t.gradients),
trainable_variables=[x])
with tf.control_dependencies([losses]):
final_x = tf.identity(x)
self.evaluate(tf1.global_variables_initializer())
final_x_, losses_, grads_ = self.evaluate((final_x, losses, grads))
self.assertAllEqual(final_x_.shape, [2])
self.assertAllEqual(losses_.shape, [num_steps, 2])
self.assertAllEqual(grads_[0].shape, [num_steps, 2])
def test_preserves_static_num_steps(self):
x = tf.Variable([5., 3.])
num_steps = 23
# Check that we preserve static shapes with static `num_steps`.
losses = tfp.math.minimize(
loss_fn=lambda: (x - 2.)**2,
num_steps=num_steps,
optimizer=tf.optimizers.Adam(0.1))
self.assertAllEqual(losses.shape, [num_steps, 2])
def test_works_with_dynamic_num_steps(self):
x = tf.Variable([5., 3.])
num_steps_ = 23
num_steps = tf1.placeholder_with_default(num_steps_, shape=[])
losses = tfp.math.minimize(
loss_fn=lambda: (x - 2.)**2,
num_steps=num_steps,
optimizer=tf.optimizers.Adam(0.1))
with tf.control_dependencies([losses]):
final_x = tf.identity(x)
self.evaluate(tf1.global_variables_initializer())
final_x_, losses_ = self.evaluate((final_x, losses))
self.assertAllEqual(final_x_.shape, [2])
self.assertAllEqual(losses_.shape, [num_steps_, 2])
def test_obeys_convergence_criterion(self):
init_x = np.array([0., 0.]).astype(np.float32)
target_x = np.array([3., 4.]).astype(np.float32)
x = tf.Variable(init_x)
loss_fn = lambda: tf.reduce_sum((x - target_x)**2)
# Check that we can trace the convergence criterion's moving average of
# decrease in loss.
trace_fn = (
lambda tq: # pylint: disable=g-long-lambda
(tq.loss, tq.convergence_criterion_state.average_decrease_in_loss))
atol = 0.1
results = tfp.math.minimize(
loss_fn, num_steps=100,
optimizer=tf.optimizers.SGD(0.1),
convergence_criterion=(
tfp.optimizer.convergence_criteria.LossNotDecreasing(atol=atol)),
trace_fn=trace_fn,
return_full_length_trace=False)
self.evaluate(tf1.global_variables_initializer())
losses_, moving_average_decreases_ = self.evaluate(results)
self.assertLess(moving_average_decreases_[-1], atol)
self.assertGreater(moving_average_decreases_[-3], atol)
self.assertAllEqual(losses_.shape, [35])
# Check that the second-step loss decreases from the first step. This could
# fail in graph mode if we were sloppy with `control_dependencies`, so that
# the steps ran simultaneously or in the wrong order.
self.assertGreater(losses_[0] - losses_[1], 1e-4)
def test_convergence_criterion_follows_batch_reduction(self):
init_x = np.zeros([100]).astype(np.float32)
target_x = np.arange(100).astype(np.float32)
x = tf.Variable(init_x)
loss_fn = lambda: (x - target_x)**2
# Stop the optimization when 70% of the threads have converged.
target_portion_converged = 0.7
batch_convergence_reduce_fn = (
lambda has_converged: tf.reduce_mean( # pylint: disable=g-long-lambda
tf.cast(has_converged, tf.float32)) > target_portion_converged)
results = tfp.math.minimize(
loss_fn, num_steps=200,
optimizer=tf.optimizers.Adam(1.0),
convergence_criterion=(
tfp.optimizer.convergence_criteria.LossNotDecreasing(atol=0.1)),
batch_convergence_reduce_fn=batch_convergence_reduce_fn,
trace_fn=lambda traceable: traceable.has_converged,
return_full_length_trace=False)
self.evaluate(tf1.global_variables_initializer())
has_converged_by_step = self.evaluate(results)
self.assertLessEqual(
np.mean(has_converged_by_step[-2]), target_portion_converged)
self.assertGreater(
np.mean(has_converged_by_step[-1]), target_portion_converged)
def test_criteria_can_run_under_xla_with_static_shape(self):
if not tf.config.experimental_functions_run_eagerly():
self.skipTest('XLA test does not make sense without tf.function')
init_x = np.array([0., 0.]).astype(np.float32)
target_x = np.array([3., 4.]).astype(np.float32)
x = tf.Variable(init_x)
loss_fn = lambda: tf.reduce_sum((x - target_x)**2)
optimizer = tf.optimizers.Adam(0.1)
num_steps = 100
# This test verifies that it works to compile the entire optimization loop,
# as opposed to the `jit_compile` argument to `minimize`, which only
# compiles an optimization step.
@tf.function(jit_compile=True)
def do_minimization(return_full_length_trace):
return tfp.math.minimize(
loss_fn=loss_fn,
num_steps=num_steps,
optimizer=optimizer,
trace_fn=lambda ms: (ms.loss, ms.has_converged),
convergence_criterion=(
tfp.optimizer.convergence_criteria.LossNotDecreasing(atol=0.1)),
return_full_length_trace=return_full_length_trace)
trace = do_minimization(return_full_length_trace=True)
self.evaluate(tf1.global_variables_initializer())
losses, has_converged = self.evaluate(trace)
self.assertEqual(num_steps, losses.shape[0])
self.assertEqual(num_steps, has_converged.shape[0])
# Verify that the test is interesting, i.e., that we actually converged
# before the end.
self.assertTrue(has_converged[-2])
# Verify that the final loss is tiled up to the end of the array.
converged_at_step = np.argmax(has_converged)
self.assertTrue(np.all(
losses[converged_at_step + 1:] == losses[converged_at_step]))
def test_jit_compile_applies_xla_context(self):
if tf.config.functions_run_eagerly():
self.skipTest('XLA test does not make sense without tf.function')
x = tf.Variable(0.)
optimizer = tf.optimizers.SGD(0.1)
# Define a 'loss' that returns a constant value indicating
# whether it is executing in an XLA context.
using_xla, not_using_xla = 42., -9999.
def xla_detecting_loss_fn():
# Search the graph hierarchy for an XLA context.
graph = tf1.get_default_graph()
while True:
if (graph._control_flow_context is not None and
graph._control_flow_context.IsXLAContext()):
return using_xla + (x - x) # Refer to `x` to ensure gradient.
try:
graph = graph.outer_graph
except AttributeError:
break
return not_using_xla + (x - x)
xla_losses = tfp.math.minimize(
loss_fn=xla_detecting_loss_fn,
num_steps=1,
optimizer=optimizer,
jit_compile=True)
self.evaluate(tf1.global_variables_initializer())
self.assertAllClose(xla_losses, [using_xla])
non_xla_losses = tfp.math.minimize(
loss_fn=xla_detecting_loss_fn,
num_steps=1,
optimizer=optimizer,
jit_compile=False)
self.assertAllClose(non_xla_losses, [not_using_xla])
def test_jit_compiled_optimization_makes_progress(self):
x = tf.Variable([5., 3.])
losses = tfp.math.minimize(
loss_fn=lambda: tf.reduce_sum((x - 2.)**2),
num_steps=10,
optimizer=tf.optimizers.Adam(0.1),
jit_compile=True)
self.evaluate(tf1.global_variables_initializer())
losses_ = self.evaluate(losses)
# Final loss should be lower than initial loss.
self.assertAllGreater(losses_[0], losses_[-1])
def test_deterministic_results_with_seed(self):
stochastic_loss_fn = lambda seed: tf.random.stateless_normal([], seed=seed)
optimizer = tf.optimizers.SGD(1e-3)
seed = test_util.test_seed(sampler_type='stateless')
losses1 = self.evaluate(
tfp.math.minimize(loss_fn=stochastic_loss_fn,
num_steps=10,
optimizer=optimizer,
seed=seed))
losses2 = self.evaluate(
tfp.math.minimize(loss_fn=stochastic_loss_fn,
num_steps=10,
optimizer=optimizer,
seed=seed))
self.assertAllEqual(losses1, losses2)
# Make sure we got different samples at each step.
self.assertAllGreater(tf.abs(losses1[1:] - losses1[:-1]), 1e-4)
if __name__ == '__main__':
test_util.main() | tensorflow_probability/python/math/minimize_test.py | """Tests for minimization utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class MinimizeTests(test_util.TestCase):
def test_custom_trace_fn(self):
init_x = np.array([0., 0.]).astype(np.float32)
target_x = np.array([3., 4.]).astype(np.float32)
x = tf.Variable(init_x)
loss_fn = lambda: tf.reduce_sum((x - target_x)**2)
# The trace_fn should determine the structure and values of the results.
def trace_fn(traceable_quantities):
return {'loss': traceable_quantities.loss, 'x': x,
'sqdiff': (x - target_x)**2}
results = tfp.math.minimize(loss_fn, num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
trace_fn=trace_fn)
self.evaluate(tf1.global_variables_initializer())
results_ = self.evaluate(results)
self.assertAllClose(results_['x'][0], init_x, atol=0.5)
self.assertAllClose(results_['x'][-1], target_x, atol=0.2)
self.assertAllClose(results_['sqdiff'][-1], [0., 0.], atol=0.1)
def test_can_trace_all_traceable_quantities(self):
x = tf.Variable(5.0)
trace_fn = lambda traceable_quantities: traceable_quantities
results = tfp.math.minimize(loss_fn=lambda: tf.reduce_sum((x - 1.0)**2),
num_steps=10,
optimizer=tf.optimizers.Adam(0.1),
trace_fn=trace_fn)
self.evaluate(tf1.global_variables_initializer())
self.evaluate(results)
def test_respects_trainable_variables(self):
# Variables not included in `trainable_variables` should stay fixed.
x = tf.Variable(5.)
y = tf.Variable(2.)
loss_fn = lambda: tf.reduce_sum((x - y)**2)
loss = tfp.math.minimize(loss_fn, num_steps=100,
optimizer=tf.optimizers.Adam(0.1),
trainable_variables=[x])
with tf.control_dependencies([loss]):
final_x = tf.identity(x)
final_y = tf.identity(y)
self.evaluate(tf1.global_variables_initializer())
final_x_, final_y_ = self.evaluate((final_x, final_y))
self.assertAllClose(final_x_, 2, atol=0.1)
self.assertEqual(final_y_, 2.) # `y` was untrained, so should be unchanged.
def test_works_when_results_have_dynamic_shape(self):
# Create a variable (and thus loss) with dynamically-shaped result.
x = tf.Variable(initial_value=tf1.placeholder_with_default(
[5., 3.], shape=None))
num_steps = 10
losses, grads = tfp.math.minimize(
loss_fn=lambda: (x - 2.)**2,
num_steps=num_steps,
# TODO(b/137299119) Replace with TF2 optimizer.
optimizer=tf1.train.AdamOptimizer(0.1),
trace_fn=lambda t: (t.loss, t.gradients),
trainable_variables=[x])
with tf.control_dependencies([losses]):
final_x = tf.identity(x)
self.evaluate(tf1.global_variables_initializer())
final_x_, losses_, grads_ = self.evaluate((final_x, losses, grads))
self.assertAllEqual(final_x_.shape, [2])
self.assertAllEqual(losses_.shape, [num_steps, 2])
self.assertAllEqual(grads_[0].shape, [num_steps, 2])
def test_preserves_static_num_steps(self):
x = tf.Variable([5., 3.])
num_steps = 23
# Check that we preserve static shapes with static `num_steps`.
losses = tfp.math.minimize(
loss_fn=lambda: (x - 2.)**2,
num_steps=num_steps,
optimizer=tf.optimizers.Adam(0.1))
self.assertAllEqual(losses.shape, [num_steps, 2])
def test_works_with_dynamic_num_steps(self):
x = tf.Variable([5., 3.])
num_steps_ = 23
num_steps = tf1.placeholder_with_default(num_steps_, shape=[])
losses = tfp.math.minimize(
loss_fn=lambda: (x - 2.)**2,
num_steps=num_steps,
optimizer=tf.optimizers.Adam(0.1))
with tf.control_dependencies([losses]):
final_x = tf.identity(x)
self.evaluate(tf1.global_variables_initializer())
final_x_, losses_ = self.evaluate((final_x, losses))
self.assertAllEqual(final_x_.shape, [2])
self.assertAllEqual(losses_.shape, [num_steps_, 2])
def test_obeys_convergence_criterion(self):
init_x = np.array([0., 0.]).astype(np.float32)
target_x = np.array([3., 4.]).astype(np.float32)
x = tf.Variable(init_x)
loss_fn = lambda: tf.reduce_sum((x - target_x)**2)
# Check that we can trace the convergence criterion's moving average of
# decrease in loss.
trace_fn = (
lambda tq: # pylint: disable=g-long-lambda
(tq.loss, tq.convergence_criterion_state.average_decrease_in_loss))
atol = 0.1
results = tfp.math.minimize(
loss_fn, num_steps=100,
optimizer=tf.optimizers.SGD(0.1),
convergence_criterion=(
tfp.optimizer.convergence_criteria.LossNotDecreasing(atol=atol)),
trace_fn=trace_fn,
return_full_length_trace=False)
self.evaluate(tf1.global_variables_initializer())
losses_, moving_average_decreases_ = self.evaluate(results)
self.assertLess(moving_average_decreases_[-1], atol)
self.assertGreater(moving_average_decreases_[-3], atol)
self.assertAllEqual(losses_.shape, [35])
# Check that the second-step loss decreases from the first step. This could
# fail in graph mode if we were sloppy with `control_dependencies`, so that
# the steps ran simultaneously or in the wrong order.
self.assertGreater(losses_[0] - losses_[1], 1e-4)
def test_convergence_criterion_follows_batch_reduction(self):
init_x = np.zeros([100]).astype(np.float32)
target_x = np.arange(100).astype(np.float32)
x = tf.Variable(init_x)
loss_fn = lambda: (x - target_x)**2
# Stop the optimization when 70% of the threads have converged.
target_portion_converged = 0.7
batch_convergence_reduce_fn = (
lambda has_converged: tf.reduce_mean( # pylint: disable=g-long-lambda
tf.cast(has_converged, tf.float32)) > target_portion_converged)
results = tfp.math.minimize(
loss_fn, num_steps=200,
optimizer=tf.optimizers.Adam(1.0),
convergence_criterion=(
tfp.optimizer.convergence_criteria.LossNotDecreasing(atol=0.1)),
batch_convergence_reduce_fn=batch_convergence_reduce_fn,
trace_fn=lambda traceable: traceable.has_converged,
return_full_length_trace=False)
self.evaluate(tf1.global_variables_initializer())
has_converged_by_step = self.evaluate(results)
self.assertLessEqual(
np.mean(has_converged_by_step[-2]), target_portion_converged)
self.assertGreater(
np.mean(has_converged_by_step[-1]), target_portion_converged)
def test_criteria_can_run_under_xla_with_static_shape(self):
if not tf.config.experimental_functions_run_eagerly():
self.skipTest('XLA test does not make sense without tf.function')
init_x = np.array([0., 0.]).astype(np.float32)
target_x = np.array([3., 4.]).astype(np.float32)
x = tf.Variable(init_x)
loss_fn = lambda: tf.reduce_sum((x - target_x)**2)
optimizer = tf.optimizers.Adam(0.1)
num_steps = 100
# This test verifies that it works to compile the entire optimization loop,
# as opposed to the `jit_compile` argument to `minimize`, which only
# compiles an optimization step.
@tf.function(jit_compile=True)
def do_minimization(return_full_length_trace):
return tfp.math.minimize(
loss_fn=loss_fn,
num_steps=num_steps,
optimizer=optimizer,
trace_fn=lambda ms: (ms.loss, ms.has_converged),
convergence_criterion=(
tfp.optimizer.convergence_criteria.LossNotDecreasing(atol=0.1)),
return_full_length_trace=return_full_length_trace)
trace = do_minimization(return_full_length_trace=True)
self.evaluate(tf1.global_variables_initializer())
losses, has_converged = self.evaluate(trace)
self.assertEqual(num_steps, losses.shape[0])
self.assertEqual(num_steps, has_converged.shape[0])
# Verify that the test is interesting, i.e., that we actually converged
# before the end.
self.assertTrue(has_converged[-2])
# Verify that the final loss is tiled up to the end of the array.
converged_at_step = np.argmax(has_converged)
self.assertTrue(np.all(
losses[converged_at_step + 1:] == losses[converged_at_step]))
def test_jit_compile_applies_xla_context(self):
if tf.config.functions_run_eagerly():
self.skipTest('XLA test does not make sense without tf.function')
x = tf.Variable(0.)
optimizer = tf.optimizers.SGD(0.1)
# Define a 'loss' that returns a constant value indicating
# whether it is executing in an XLA context.
using_xla, not_using_xla = 42., -9999.
def xla_detecting_loss_fn():
# Search the graph hierarchy for an XLA context.
graph = tf1.get_default_graph()
while True:
if (graph._control_flow_context is not None and
graph._control_flow_context.IsXLAContext()):
return using_xla + (x - x) # Refer to `x` to ensure gradient.
try:
graph = graph.outer_graph
except AttributeError:
break
return not_using_xla + (x - x)
xla_losses = tfp.math.minimize(
loss_fn=xla_detecting_loss_fn,
num_steps=1,
optimizer=optimizer,
jit_compile=True)
self.evaluate(tf1.global_variables_initializer())
self.assertAllClose(xla_losses, [using_xla])
non_xla_losses = tfp.math.minimize(
loss_fn=xla_detecting_loss_fn,
num_steps=1,
optimizer=optimizer,
jit_compile=False)
self.assertAllClose(non_xla_losses, [not_using_xla])
def test_jit_compiled_optimization_makes_progress(self):
x = tf.Variable([5., 3.])
losses = tfp.math.minimize(
loss_fn=lambda: tf.reduce_sum((x - 2.)**2),
num_steps=10,
optimizer=tf.optimizers.Adam(0.1),
jit_compile=True)
self.evaluate(tf1.global_variables_initializer())
losses_ = self.evaluate(losses)
# Final loss should be lower than initial loss.
self.assertAllGreater(losses_[0], losses_[-1])
def test_deterministic_results_with_seed(self):
stochastic_loss_fn = lambda seed: tf.random.stateless_normal([], seed=seed)
optimizer = tf.optimizers.SGD(1e-3)
seed = test_util.test_seed(sampler_type='stateless')
losses1 = self.evaluate(
tfp.math.minimize(loss_fn=stochastic_loss_fn,
num_steps=10,
optimizer=optimizer,
seed=seed))
losses2 = self.evaluate(
tfp.math.minimize(loss_fn=stochastic_loss_fn,
num_steps=10,
optimizer=optimizer,
seed=seed))
self.assertAllEqual(losses1, losses2)
# Make sure we got different samples at each step.
self.assertAllGreater(tf.abs(losses1[1:] - losses1[:-1]), 1e-4)
if __name__ == '__main__':
test_util.main() | 0.912095 | 0.585931 |
from enum import IntEnum, unique
import qtawesome as qta
from PyQt5.Qt import QIcon
from hbutils.model import int_enum_loads
@int_enum_loads(enable_int=False, name_preprocess=str.upper, )
@unique
class ProcessingStatus(IntEnum):
PENDING = 0
WAITING = 1
PROCESSING = 2
COMPLETED = 3
ERROR = 4
@property
def text(self):
if self == self.PENDING:
return "待处理"
elif self == self.WAITING:
return "等待中"
elif self == self.PROCESSING:
return "正在处理"
elif self == self.COMPLETED:
return "处理完毕"
elif self == self.ERROR:
return "处理失败"
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def icon(self) -> QIcon:
if self == self.PENDING:
return qta.icon('fa5.sticky-note', color='grey')
elif self == self.WAITING:
return qta.icon('fa.clock-o', color='yellow')
elif self == self.PROCESSING:
return qta.icon('fa.hourglass-1', color='blue')
elif self == self.COMPLETED:
return qta.icon('fa.check', color='green')
elif self == self.ERROR:
return qta.icon('fa.remove', color='red')
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@int_enum_loads(enable_int=False, name_preprocess=str.upper, )
@unique
class NameStatus(IntEnum):
NOTHING = 0
INDEPENDENT = 1
DEPENDENT = 2
@property
def text(self):
if self == self.NOTHING:
return "不参与运算"
elif self == self.INDEPENDENT:
return "自变量"
elif self == self.DEPENDENT:
return "因变量"
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def icon(self) -> QIcon:
if self == self.NOTHING:
return qta.icon('mdi.do-not-disturb', color='grey')
elif self == self.INDEPENDENT:
return qta.icon('ri.input-cursor-move', color='#e97311')
elif self == self.DEPENDENT:
return qta.icon('msc.output', color='blue')
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def next(self) -> 'NameStatus':
if self == self.NOTHING:
return self.INDEPENDENT
elif self == self.INDEPENDENT:
return self.DEPENDENT
elif self == self.DEPENDENT:
return self.NOTHING
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@int_enum_loads(enable_int=False, name_preprocess=str.upper, )
@unique
class DependentNameStatus(IntEnum):
NOTHING = 0
DEPENDENT = 1
@property
def text(self):
if self == self.NOTHING:
return "不参与运算"
elif self == self.DEPENDENT:
return "因变量"
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def icon(self) -> QIcon:
if self == self.NOTHING:
return qta.icon('mdi.do-not-disturb', color='grey')
elif self == self.DEPENDENT:
return qta.icon('msc.output', color='blue')
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def next(self) -> 'DependentNameStatus':
if self == self.NOTHING:
return self.DEPENDENT
elif self == self.DEPENDENT:
return self.NOTHING
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@int_enum_loads(name_preprocess=str.upper, )
@unique
class MessageType(IntEnum):
PENDING = 0
FAILED = 1
SUCCESS = 2
@property
def text(self):
if self == self.PENDING:
return "正在发送"
elif self == self.FAILED:
return "发送失败"
elif self == self.SUCCESS:
return "发送成功"
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def icon(self) -> QIcon:
if self == self.PENDING:
return qta.icon('fa5s.hourglass-half', color='yellow')
elif self == self.FAILED:
return qta.icon('fa5s.times', color='red')
elif self == self.SUCCESS:
return qta.icon('fa5s.check', color='green')
else:
raise ValueError(f'Unknown status - {repr(self)}.') | app/widget/models.py | from enum import IntEnum, unique
import qtawesome as qta
from PyQt5.Qt import QIcon
from hbutils.model import int_enum_loads
@int_enum_loads(enable_int=False, name_preprocess=str.upper, )
@unique
class ProcessingStatus(IntEnum):
PENDING = 0
WAITING = 1
PROCESSING = 2
COMPLETED = 3
ERROR = 4
@property
def text(self):
if self == self.PENDING:
return "待处理"
elif self == self.WAITING:
return "等待中"
elif self == self.PROCESSING:
return "正在处理"
elif self == self.COMPLETED:
return "处理完毕"
elif self == self.ERROR:
return "处理失败"
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def icon(self) -> QIcon:
if self == self.PENDING:
return qta.icon('fa5.sticky-note', color='grey')
elif self == self.WAITING:
return qta.icon('fa.clock-o', color='yellow')
elif self == self.PROCESSING:
return qta.icon('fa.hourglass-1', color='blue')
elif self == self.COMPLETED:
return qta.icon('fa.check', color='green')
elif self == self.ERROR:
return qta.icon('fa.remove', color='red')
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@int_enum_loads(enable_int=False, name_preprocess=str.upper, )
@unique
class NameStatus(IntEnum):
NOTHING = 0
INDEPENDENT = 1
DEPENDENT = 2
@property
def text(self):
if self == self.NOTHING:
return "不参与运算"
elif self == self.INDEPENDENT:
return "自变量"
elif self == self.DEPENDENT:
return "因变量"
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def icon(self) -> QIcon:
if self == self.NOTHING:
return qta.icon('mdi.do-not-disturb', color='grey')
elif self == self.INDEPENDENT:
return qta.icon('ri.input-cursor-move', color='#e97311')
elif self == self.DEPENDENT:
return qta.icon('msc.output', color='blue')
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def next(self) -> 'NameStatus':
if self == self.NOTHING:
return self.INDEPENDENT
elif self == self.INDEPENDENT:
return self.DEPENDENT
elif self == self.DEPENDENT:
return self.NOTHING
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@int_enum_loads(enable_int=False, name_preprocess=str.upper, )
@unique
class DependentNameStatus(IntEnum):
NOTHING = 0
DEPENDENT = 1
@property
def text(self):
if self == self.NOTHING:
return "不参与运算"
elif self == self.DEPENDENT:
return "因变量"
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def icon(self) -> QIcon:
if self == self.NOTHING:
return qta.icon('mdi.do-not-disturb', color='grey')
elif self == self.DEPENDENT:
return qta.icon('msc.output', color='blue')
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def next(self) -> 'DependentNameStatus':
if self == self.NOTHING:
return self.DEPENDENT
elif self == self.DEPENDENT:
return self.NOTHING
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@int_enum_loads(name_preprocess=str.upper, )
@unique
class MessageType(IntEnum):
PENDING = 0
FAILED = 1
SUCCESS = 2
@property
def text(self):
if self == self.PENDING:
return "正在发送"
elif self == self.FAILED:
return "发送失败"
elif self == self.SUCCESS:
return "发送成功"
else:
raise ValueError(f'Unknown status - {repr(self)}.')
@property
def icon(self) -> QIcon:
if self == self.PENDING:
return qta.icon('fa5s.hourglass-half', color='yellow')
elif self == self.FAILED:
return qta.icon('fa5s.times', color='red')
elif self == self.SUCCESS:
return qta.icon('fa5s.check', color='green')
else:
raise ValueError(f'Unknown status - {repr(self)}.') | 0.572006 | 0.162015 |
import asyncio
import voluptuous as vol
from homeassistant.components.knx import DATA_KNX, ATTR_DISCOVER_DEVICES
from homeassistant.components.notify import PLATFORM_SCHEMA, \
BaseNotificationService
from homeassistant.const import CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
CONF_ADDRESS = 'address'
DEFAULT_NAME = 'KNX Notify'
DEPENDENCIES = ['knx']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
@asyncio.coroutine
def async_get_service(hass, config, discovery_info=None):
"""Get the KNX notification service."""
return async_get_service_discovery(hass, discovery_info) \
if discovery_info is not None else \
async_get_service_config(hass, config)
@callback
def async_get_service_discovery(hass, discovery_info):
"""Set up notifications for KNX platform configured via xknx.yaml."""
notification_devices = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
notification_devices.append(device)
return \
KNXNotificationService(notification_devices) \
if notification_devices else \
None
@callback
def async_get_service_config(hass, config):
"""Set up notification for KNX platform configured within platform."""
import xknx
notification = xknx.devices.Notification(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address=config.get(CONF_ADDRESS))
hass.data[DATA_KNX].xknx.devices.add(notification)
return KNXNotificationService([notification, ])
class KNXNotificationService(BaseNotificationService):
"""Implement demo notification service."""
def __init__(self, devices):
"""Initialize the service."""
self.devices = devices
@property
def targets(self):
"""Return a dictionary of registered targets."""
ret = {}
for device in self.devices:
ret[device.name] = device.name
return ret
@asyncio.coroutine
def async_send_message(self, message="", **kwargs):
"""Send a notification to knx bus."""
if "target" in kwargs:
yield from self._async_send_to_device(message, kwargs["target"])
else:
yield from self._async_send_to_all_devices(message)
@asyncio.coroutine
def _async_send_to_all_devices(self, message):
"""Send a notification to knx bus to all connected devices."""
for device in self.devices:
yield from device.set(message)
@asyncio.coroutine
def _async_send_to_device(self, message, names):
"""Send a notification to knx bus to device with given names."""
for device in self.devices:
if device.name in names:
yield from device.set(message) | homeassistant/components/notify/knx.py | import asyncio
import voluptuous as vol
from homeassistant.components.knx import DATA_KNX, ATTR_DISCOVER_DEVICES
from homeassistant.components.notify import PLATFORM_SCHEMA, \
BaseNotificationService
from homeassistant.const import CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
CONF_ADDRESS = 'address'
DEFAULT_NAME = 'KNX Notify'
DEPENDENCIES = ['knx']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
@asyncio.coroutine
def async_get_service(hass, config, discovery_info=None):
"""Get the KNX notification service."""
return async_get_service_discovery(hass, discovery_info) \
if discovery_info is not None else \
async_get_service_config(hass, config)
@callback
def async_get_service_discovery(hass, discovery_info):
"""Set up notifications for KNX platform configured via xknx.yaml."""
notification_devices = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
notification_devices.append(device)
return \
KNXNotificationService(notification_devices) \
if notification_devices else \
None
@callback
def async_get_service_config(hass, config):
"""Set up notification for KNX platform configured within platform."""
import xknx
notification = xknx.devices.Notification(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address=config.get(CONF_ADDRESS))
hass.data[DATA_KNX].xknx.devices.add(notification)
return KNXNotificationService([notification, ])
class KNXNotificationService(BaseNotificationService):
"""Implement demo notification service."""
def __init__(self, devices):
"""Initialize the service."""
self.devices = devices
@property
def targets(self):
"""Return a dictionary of registered targets."""
ret = {}
for device in self.devices:
ret[device.name] = device.name
return ret
@asyncio.coroutine
def async_send_message(self, message="", **kwargs):
"""Send a notification to knx bus."""
if "target" in kwargs:
yield from self._async_send_to_device(message, kwargs["target"])
else:
yield from self._async_send_to_all_devices(message)
@asyncio.coroutine
def _async_send_to_all_devices(self, message):
"""Send a notification to knx bus to all connected devices."""
for device in self.devices:
yield from device.set(message)
@asyncio.coroutine
def _async_send_to_device(self, message, names):
"""Send a notification to knx bus to device with given names."""
for device in self.devices:
if device.name in names:
yield from device.set(message) | 0.640074 | 0.114567 |
import string
import email.message
from time import strftime
import sys
from random import *
from optparse import OptionParser
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib,os,random,time
from colorama import *
from datetime import datetime
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
init()
la7mar = '\033[91m'
lazra9 = '\033[94m'
la5dhar = '\033[92m'
movv = '\033[95m'
lasfar = '\033[93m'
ramadi = '\033[90m'
blid = '\033[1m'
star = '\033[4m'
bigas = '\033[07m'
bigbbs = '\033[27m'
hell = '\033[05m'
saker = '\033[25m'
labyadh = '\033[00m'
cyan = '\033[0;96m'
def cls():
linux = 'clear'
windows = 'cls'
os.system([linux, windows][os.name == 'nt'])
def print_logo():
clear = "\x1b[0m"
colors = [ 36]
x = """
He3llbound237 S3ndR v0.1============================================
_____ [+] Facebook : https://www.facebook.com/marwan.23.07
.-,;='';_),-. [+] Github :https://github.com/phenomenal23 [+]
(,___,) [+] tg: - @hellbound237
,-/`~`\-,___ [+] Website : - soon
/ /).:.('--._ [+] Version : 0.1
{_[ (_,_) [+] Date : 08/09/2021
| Y | [+] Think Twice , Code one [+] / | \ [+]
""" """
==========================================================================================
"""
for N, line in enumerate(x.split("\n")):
sys.stdout.write("\x1b[1;%dm%s%s\n" % (random.choice(colors), line, clear))
time.sleep(0.05)
cls()
print_logo()
def sendiyaspy(site):
try:
Namex = open('Send3r/Name.txt', 'r')
Name = random.choice(open('Send3r/Name.txt').readlines())
Name = Name.replace('\n', '').replace('\r', '')
Namex.close()
Subjectx = open('Send3r/Subject.txt', 'r')
Subject = random.choice(open('Send3r/Subject.txt').readlines())
Subject = Subject.replace('\n', '').replace('\r', '')
Subjectx.close()
smtprandom = open('Send3r/Smtps.txt', 'r')
smtp = random.choice(open('Send3r/Smtps.txt').readlines())
smtp = smtp.replace('\n', '').replace('\r', '')
smtprandom.close()
ur = smtp.rstrip()
ch = ur.split('\n')[0].split('|')
serveraddr = ch[0]
toaddr = site
fromaddr = ch[2]
serverport = ch[1]
SMTP_USER = ch[2]
SMTP_PASS = ch[3]
msg = MIMEMultipart()
msg['Subject'] = Subject
msg['From'] = Name
msg['To'] = toaddr
msg.add_header('Content-Type', 'text/html')
msg.attach(MIMEText(data, 'html', 'utf-8'))
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
server = smtplib.SMTP()
server.connect(serveraddr, serverport)
server.login(SMTP_USER, SMTP_PASS)
server.sendmail(fromaddr, [msg['To']], msg.as_string())
server.quit()
print la5dhar + 'h3llbound-s3nd3r.v0.1--------------------------------------' + labyadh
print labyadh + 'Time :' + la7mar + current_time + labyadh
print lasfar + 'To :' + la7mar + toaddr + labyadh
print lasfar + 'Subject :' + la7mar + Subject + labyadh
print lasfar + 'Name :' + la7mar + Name + labyadh
print lasfar + 'Smtp :' + la7mar + ur + labyadh
print lasfar + 'Status :' + la5dhar + 'Success' + labyadh
print la5dhar + '--------------------+-------------------------------------------------------------------' + labyadh
except:
print la7mar + '--------------------+-------------------------------------------------------------------' + labyadh
print '----Smtp Not Working == dead!-->'+ur
print la7mar + '--------------------+-------------------------------------------------------------------' + labyadh
with open('Send3r/letter.txt', 'r') as myfile:
data = myfile.read()
Email = raw_input('Enter Maillist.txt : ')
def main():
for i in ListPass:
try:
site = i.strip()
data=sendiyaspy(site)
except:
pass
ListPass = open(Email, 'r').readlines()
pool = ThreadPool(10)
pool.map(sendiyaspy, ListPass)
pool.close()
pool.join()
if __name__ == '__main__':
print("Finished, success") | mass.py | import string
import email.message
from time import strftime
import sys
from random import *
from optparse import OptionParser
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib,os,random,time
from colorama import *
from datetime import datetime
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
init()
la7mar = '\033[91m'
lazra9 = '\033[94m'
la5dhar = '\033[92m'
movv = '\033[95m'
lasfar = '\033[93m'
ramadi = '\033[90m'
blid = '\033[1m'
star = '\033[4m'
bigas = '\033[07m'
bigbbs = '\033[27m'
hell = '\033[05m'
saker = '\033[25m'
labyadh = '\033[00m'
cyan = '\033[0;96m'
def cls():
linux = 'clear'
windows = 'cls'
os.system([linux, windows][os.name == 'nt'])
def print_logo():
clear = "\x1b[0m"
colors = [ 36]
x = """
He3llbound237 S3ndR v0.1============================================
_____ [+] Facebook : https://www.facebook.com/marwan.23.07
.-,;='';_),-. [+] Github :https://github.com/phenomenal23 [+]
(,___,) [+] tg: - @hellbound237
,-/`~`\-,___ [+] Website : - soon
/ /).:.('--._ [+] Version : 0.1
{_[ (_,_) [+] Date : 08/09/2021
| Y | [+] Think Twice , Code one [+] / | \ [+]
""" """
==========================================================================================
"""
for N, line in enumerate(x.split("\n")):
sys.stdout.write("\x1b[1;%dm%s%s\n" % (random.choice(colors), line, clear))
time.sleep(0.05)
cls()
print_logo()
def sendiyaspy(site):
try:
Namex = open('Send3r/Name.txt', 'r')
Name = random.choice(open('Send3r/Name.txt').readlines())
Name = Name.replace('\n', '').replace('\r', '')
Namex.close()
Subjectx = open('Send3r/Subject.txt', 'r')
Subject = random.choice(open('Send3r/Subject.txt').readlines())
Subject = Subject.replace('\n', '').replace('\r', '')
Subjectx.close()
smtprandom = open('Send3r/Smtps.txt', 'r')
smtp = random.choice(open('Send3r/Smtps.txt').readlines())
smtp = smtp.replace('\n', '').replace('\r', '')
smtprandom.close()
ur = smtp.rstrip()
ch = ur.split('\n')[0].split('|')
serveraddr = ch[0]
toaddr = site
fromaddr = ch[2]
serverport = ch[1]
SMTP_USER = ch[2]
SMTP_PASS = ch[3]
msg = MIMEMultipart()
msg['Subject'] = Subject
msg['From'] = Name
msg['To'] = toaddr
msg.add_header('Content-Type', 'text/html')
msg.attach(MIMEText(data, 'html', 'utf-8'))
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
server = smtplib.SMTP()
server.connect(serveraddr, serverport)
server.login(SMTP_USER, SMTP_PASS)
server.sendmail(fromaddr, [msg['To']], msg.as_string())
server.quit()
print la5dhar + 'h3llbound-s3nd3r.v0.1--------------------------------------' + labyadh
print labyadh + 'Time :' + la7mar + current_time + labyadh
print lasfar + 'To :' + la7mar + toaddr + labyadh
print lasfar + 'Subject :' + la7mar + Subject + labyadh
print lasfar + 'Name :' + la7mar + Name + labyadh
print lasfar + 'Smtp :' + la7mar + ur + labyadh
print lasfar + 'Status :' + la5dhar + 'Success' + labyadh
print la5dhar + '--------------------+-------------------------------------------------------------------' + labyadh
except:
print la7mar + '--------------------+-------------------------------------------------------------------' + labyadh
print '----Smtp Not Working == dead!-->'+ur
print la7mar + '--------------------+-------------------------------------------------------------------' + labyadh
with open('Send3r/letter.txt', 'r') as myfile:
data = myfile.read()
Email = raw_input('Enter Maillist.txt : ')
def main():
for i in ListPass:
try:
site = i.strip()
data=sendiyaspy(site)
except:
pass
ListPass = open(Email, 'r').readlines()
pool = ThreadPool(10)
pool.map(sendiyaspy, ListPass)
pool.close()
pool.join()
if __name__ == '__main__':
print("Finished, success") | 0.075138 | 0.079782 |
from cvxpy.atoms.atom import Atom
from cvxpy.atoms.affine.binary_operators import multiply
import numpy as np
import scipy.sparse as sp
def diff_pos(x, y):
r"""The difference :math:`x - y` with domain `\{x, y : x > y > 0\}`.
This atom is log-log concave.
Parameters:
----------
x : :class:`~cvxpy.expressions.expression.Expression`
An Expression.
y : :class:`~cvxpy.expressions.expression.Expression`
An Expression.
"""
return multiply(x, one_minus_pos(y/x))
class one_minus_pos(Atom):
r"""The difference :math:`1 - x` with domain `\{x : 0 < x < 1\}`.
This atom is log-log concave.
Parameters:
----------
x : :class:`~cvxpy.expressions.expression.Expression`
An Expression.
"""
def __init__(self, x):
super(one_minus_pos, self).__init__(x)
self.args[0] = x
self._ones = np.ones(self.args[0].shape)
def numeric(self, values):
return self._ones - values[0]
def _grad(self, values):
del values
return sp.csc_matrix(-1.0 * self._ones)
def name(self):
return "%s(%s)" % (self.__class__.__name__, self.args[0])
def shape_from_args(self):
"""Returns the (row, col) shape of the expression.
"""
return self.args[0].shape
def sign_from_args(self):
"""Returns sign (is positive, is negative) of the expression.
"""
return (True, False)
def is_atom_convex(self):
"""Is the atom convex?
"""
return False
def is_atom_concave(self):
"""Is the atom concave?
"""
return False
def is_atom_log_log_convex(self):
"""Is the atom log-log convex?
"""
return False
def is_atom_log_log_concave(self):
"""Is the atom log-log concave?
"""
return True
def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
"""
return False
def is_decr(self, idx):
"""Is the composition non-increasing in argument idx?
"""
return True | cvxpy/atoms/one_minus_pos.py | from cvxpy.atoms.atom import Atom
from cvxpy.atoms.affine.binary_operators import multiply
import numpy as np
import scipy.sparse as sp
def diff_pos(x, y):
r"""The difference :math:`x - y` with domain `\{x, y : x > y > 0\}`.
This atom is log-log concave.
Parameters:
----------
x : :class:`~cvxpy.expressions.expression.Expression`
An Expression.
y : :class:`~cvxpy.expressions.expression.Expression`
An Expression.
"""
return multiply(x, one_minus_pos(y/x))
class one_minus_pos(Atom):
r"""The difference :math:`1 - x` with domain `\{x : 0 < x < 1\}`.
This atom is log-log concave.
Parameters:
----------
x : :class:`~cvxpy.expressions.expression.Expression`
An Expression.
"""
def __init__(self, x):
super(one_minus_pos, self).__init__(x)
self.args[0] = x
self._ones = np.ones(self.args[0].shape)
def numeric(self, values):
return self._ones - values[0]
def _grad(self, values):
del values
return sp.csc_matrix(-1.0 * self._ones)
def name(self):
return "%s(%s)" % (self.__class__.__name__, self.args[0])
def shape_from_args(self):
"""Returns the (row, col) shape of the expression.
"""
return self.args[0].shape
def sign_from_args(self):
"""Returns sign (is positive, is negative) of the expression.
"""
return (True, False)
def is_atom_convex(self):
"""Is the atom convex?
"""
return False
def is_atom_concave(self):
"""Is the atom concave?
"""
return False
def is_atom_log_log_convex(self):
"""Is the atom log-log convex?
"""
return False
def is_atom_log_log_concave(self):
"""Is the atom log-log concave?
"""
return True
def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
"""
return False
def is_decr(self, idx):
"""Is the composition non-increasing in argument idx?
"""
return True | 0.922944 | 0.779637 |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from .base_model_ import Model
from .. import util
class DocumentInfoPageEntry(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, height: float=None, width: float=None): # noqa: E501
"""DocumentInfoPageEntry - a model defined in Swagger
:param height: The height of this DocumentInfoPageEntry. # noqa: E501
:type height: float
:param width: The width of this DocumentInfoPageEntry. # noqa: E501
:type width: float
"""
self.swagger_types = {
'height': float,
'width': float
}
self.attribute_map = {
'height': 'height',
'width': 'width'
}
self._height = height
self._width = width
@classmethod
def from_dict(cls, dikt) -> 'DocumentInfoPageEntry':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The DocumentInfoPageEntry of this DocumentInfoPageEntry. # noqa: E501
:rtype: DocumentInfoPageEntry
"""
return util.deserialize_model(dikt, cls)
@property
def height(self) -> float:
"""Gets the height of this DocumentInfoPageEntry.
The height of the page, in PDF units. # noqa: E501
:return: The height of this DocumentInfoPageEntry.
:rtype: float
"""
return self._height
@height.setter
def height(self, height: float):
"""Sets the height of this DocumentInfoPageEntry.
The height of the page, in PDF units. # noqa: E501
:param height: The height of this DocumentInfoPageEntry.
:type height: float
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`") # noqa: E501
self._height = height
@property
def width(self) -> float:
"""Gets the width of this DocumentInfoPageEntry.
The width of the page, in PDF units. # noqa: E501
:return: The width of this DocumentInfoPageEntry.
:rtype: float
"""
return self._width
@width.setter
def width(self, width: float):
"""Sets the width of this DocumentInfoPageEntry.
The width of the page, in PDF units. # noqa: E501
:param width: The width of this DocumentInfoPageEntry.
:type width: float
"""
if width is None:
raise ValueError("Invalid value for `width`, must not be `None`") # noqa: E501
self._width = width | backend/swagger_server/models/document_info_page_entry.py |
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from .base_model_ import Model
from .. import util
class DocumentInfoPageEntry(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, height: float=None, width: float=None): # noqa: E501
"""DocumentInfoPageEntry - a model defined in Swagger
:param height: The height of this DocumentInfoPageEntry. # noqa: E501
:type height: float
:param width: The width of this DocumentInfoPageEntry. # noqa: E501
:type width: float
"""
self.swagger_types = {
'height': float,
'width': float
}
self.attribute_map = {
'height': 'height',
'width': 'width'
}
self._height = height
self._width = width
@classmethod
def from_dict(cls, dikt) -> 'DocumentInfoPageEntry':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The DocumentInfoPageEntry of this DocumentInfoPageEntry. # noqa: E501
:rtype: DocumentInfoPageEntry
"""
return util.deserialize_model(dikt, cls)
@property
def height(self) -> float:
"""Gets the height of this DocumentInfoPageEntry.
The height of the page, in PDF units. # noqa: E501
:return: The height of this DocumentInfoPageEntry.
:rtype: float
"""
return self._height
@height.setter
def height(self, height: float):
"""Sets the height of this DocumentInfoPageEntry.
The height of the page, in PDF units. # noqa: E501
:param height: The height of this DocumentInfoPageEntry.
:type height: float
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`") # noqa: E501
self._height = height
@property
def width(self) -> float:
"""Gets the width of this DocumentInfoPageEntry.
The width of the page, in PDF units. # noqa: E501
:return: The width of this DocumentInfoPageEntry.
:rtype: float
"""
return self._width
@width.setter
def width(self, width: float):
"""Sets the width of this DocumentInfoPageEntry.
The width of the page, in PDF units. # noqa: E501
:param width: The width of this DocumentInfoPageEntry.
:type width: float
"""
if width is None:
raise ValueError("Invalid value for `width`, must not be `None`") # noqa: E501
self._width = width | 0.937929 | 0.197097 |
import csv
import time
import json
import emoji
import re
import pickle
from emoji import unicode_codes
filename = "/data/06333/aroraish/ut_venmo_2018.json"
brokenlines = "/data/06333/aroraish/broken4.txt"
emojicols = [u"\U0001f3fb", u"\U0001f3fc", u"\U0001f3fd", u"\U0001f3fe", u"\U0001f3ff"]
pattern = u'(' + u'|'.join(re.escape(u) for u in emojicols) + u')'
allCols = re.compile(pattern)
emojiss = unicode_codes.EMOJI_ALIAS_UNICODE
coloured = set()
for key in emojiss:
if(allCols.findall(emojiss[key])):
coloured.add(emojiss[key])
coloured.add(allCols.sub('',emojiss[key]))
coloured.remove(u"")
emojis = sorted(coloured, key=len,
reverse=True)
pattern2 = u'(' + u'|'.join(re.escape(u) for u in emojis) + u')'
colouredRE = re.compile(pattern2)
emojis = sorted(emojiss.values(), key=len,
reverse=True)
pattern3 = u'(' + u'|'.join(re.escape(u) for u in emojis) + u')'
ree = re.compile(pattern3)
total = 0
emojim = 0
emojit = 0
with open(filename, 'r') as f, open("/data/06333/aroraish/modifiableE.csv", 'w') as m, open("/data/06333/aroraish/modifiableN.csv", "w") as mn, open("/data/06333/aroraish/rest.csv","w") as r, open(brokenlines, 'w') as bf:
fieldnames = ['id', 'type', 'message', 'time']
csvwrt = csv.DictWriter(m, fieldnames=fieldnames)
csvwrt.writeheader()
csvwrt = csv.DictWriter(mn, fieldnames=fieldnames)
csvwrt.writeheader()
csvwrt = csv.DictWriter(r, fieldnames=fieldnames)
csvwrt.writeheader()
mod = csv.writer(m)
modnon = csv.writer(mn)
rest = csv.writer(r)
for line in f:
total += 1
try:
j = json.loads(line)
message = j[u'message']
time = j[u'created_time']
payment_id = str(j[u'payment_id'])
what_type = j[u'type']
if(len(colouredRE.findall(message)) > 0):
emojim+=1
mod.writerow([payment_id.encode("utf-8"), what_type.encode("utf-8"), message.encode("utf-8"), time.encode("utf-8")])
elif(len(ree.findall(message)) > 0):
emojit += 1
modnon.writerow([payment_id.encode("utf-8"), what_type.encode("utf-8"), message.encode("utf-8"), time.encode("utf-8")])
else:
rest.writerow([payment_id.encode("utf-8"), what_type.encode("utf-8"), message.encode("utf-8"), time.encode("utf-8")])
except:
total -= 1
bf.write(line)
te = [total, emojim, emojit]
pickle.dump(te, open("stats.pkl","w")) | gonnawork2.py | import csv
import time
import json
import emoji
import re
import pickle
from emoji import unicode_codes
filename = "/data/06333/aroraish/ut_venmo_2018.json"
brokenlines = "/data/06333/aroraish/broken4.txt"
emojicols = [u"\U0001f3fb", u"\U0001f3fc", u"\U0001f3fd", u"\U0001f3fe", u"\U0001f3ff"]
pattern = u'(' + u'|'.join(re.escape(u) for u in emojicols) + u')'
allCols = re.compile(pattern)
emojiss = unicode_codes.EMOJI_ALIAS_UNICODE
coloured = set()
for key in emojiss:
if(allCols.findall(emojiss[key])):
coloured.add(emojiss[key])
coloured.add(allCols.sub('',emojiss[key]))
coloured.remove(u"")
emojis = sorted(coloured, key=len,
reverse=True)
pattern2 = u'(' + u'|'.join(re.escape(u) for u in emojis) + u')'
colouredRE = re.compile(pattern2)
emojis = sorted(emojiss.values(), key=len,
reverse=True)
pattern3 = u'(' + u'|'.join(re.escape(u) for u in emojis) + u')'
ree = re.compile(pattern3)
total = 0
emojim = 0
emojit = 0
with open(filename, 'r') as f, open("/data/06333/aroraish/modifiableE.csv", 'w') as m, open("/data/06333/aroraish/modifiableN.csv", "w") as mn, open("/data/06333/aroraish/rest.csv","w") as r, open(brokenlines, 'w') as bf:
fieldnames = ['id', 'type', 'message', 'time']
csvwrt = csv.DictWriter(m, fieldnames=fieldnames)
csvwrt.writeheader()
csvwrt = csv.DictWriter(mn, fieldnames=fieldnames)
csvwrt.writeheader()
csvwrt = csv.DictWriter(r, fieldnames=fieldnames)
csvwrt.writeheader()
mod = csv.writer(m)
modnon = csv.writer(mn)
rest = csv.writer(r)
for line in f:
total += 1
try:
j = json.loads(line)
message = j[u'message']
time = j[u'created_time']
payment_id = str(j[u'payment_id'])
what_type = j[u'type']
if(len(colouredRE.findall(message)) > 0):
emojim+=1
mod.writerow([payment_id.encode("utf-8"), what_type.encode("utf-8"), message.encode("utf-8"), time.encode("utf-8")])
elif(len(ree.findall(message)) > 0):
emojit += 1
modnon.writerow([payment_id.encode("utf-8"), what_type.encode("utf-8"), message.encode("utf-8"), time.encode("utf-8")])
else:
rest.writerow([payment_id.encode("utf-8"), what_type.encode("utf-8"), message.encode("utf-8"), time.encode("utf-8")])
except:
total -= 1
bf.write(line)
te = [total, emojim, emojit]
pickle.dump(te, open("stats.pkl","w")) | 0.094109 | 0.056809 |
from behave import *
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import urllib.parse as urlparse
import re
def getToken(browser):
parsed = urlparse.urlparse(browser.current_url)
return urlparse.parse_qs(parsed.query)['token'][0]
category = None
categoryCount = 0
imagesCount = 0
@given(u'a web browser is on a product edit page')
def step_impl(context):
context.browser.get("http://mys01.fit.vutbr.cz:8024/admin/index.php?route=catalog/product/edit&token=%s&product_id=41" % getToken(context.browser))
assert(u'Edit Product' == context.browser.find_element_by_css_selector("#content h3.panel-title").text)
@given(u'the "General" tab is selected in tab menu')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(1) a").click()
assert(u'General' == context.browser.find_element_by_css_selector("#content .nav-tabs li.active a").text)
@given(u'the user adds "42" to the "Product name" input')
def step_impl(context):
nameInput = context.browser.find_element_by_css_selector("#input-name1")
nameInput.send_keys("42")
@when(u'the user clicks on the "Save" button')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .page-header button[type='submit']").click()
@then(u'product edit page is reloaded')
def step_impl(context):
assert(u'Product List' == context.browser.find_element_by_css_selector("#content h3.panel-title").text)
@then(u'the "Product name" value is "iMac42"')
def step_impl(context):
context.browser.get("http://mys01.fit.vutbr.cz:8024/admin/index.php?route=catalog/product/edit&token=%s&product_id=41" % getToken(context.browser))
assert(u'iMac42' == context.browser.find_element_by_css_selector("#input-name1").get_attribute("value"))
@given(u'the "Data" tab is selected in tab menu')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(2) a").click()
assert(u'Data' == context.browser.find_element_by_css_selector("#content .nav-tabs li.active a").text)
@when(u'the user hovers on SKU tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(2) > label > span")).perform()
@then(u'the Stock Keeping Unit is shown')
def step_impl(context):
assert(u'Stock Keeping Unit' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(2) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on UPC tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(3) > label > span")).perform()
@then(u'the Universal Product Code is shown')
def step_impl(context):
assert(u'Universal Product Code' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(3) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on EAN tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(4) > label > span")).perform()
@then(u'the European Article Number is shown')
def step_impl(context):
assert(u'European Article Number' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(4) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on JAN tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(5) > label > span")).perform()
@then(u'the Japanese Article Number is shown')
def step_impl(context):
assert(u'Japanese Article Number' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(5) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on ISBN tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(6) > label > span")).perform()
@then(u'the International Standard Book Number is shown')
def step_impl(context):
assert(u'International Standard Book Number' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(6) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on MPN tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(7) > label > span")).perform()
@then(u'the Manufacturer Part Number is shown')
def step_impl(context):
assert(u'Manufacturer Part Number' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(7) > label > span").get_attribute('data-original-title'))
@when(u'the user clicks on calendar icon for "Date Available" input')
def step_impl(context):
context.browser.find_element_by_css_selector("#tab-data > div:nth-child(17) > div > div > span > button").click()
@then(u'a calendar selector is shown below the input')
def step_impl(context):
classAttribute = context.browser.find_element_by_css_selector("body > div.bootstrap-datetimepicker-widget.dropdown-menu").get_attribute("class")
assert(re.search("picker-open", classAttribute) != False)
@given(u'an empty value is in "Model" input')
def step_impl(context):
modelInput = context.browser.find_element_by_css_selector("#input-model")
modelInput.clear()
assert(modelInput.text == '')
@then(u'a warning is displayed below the page title')
def step_impl(context):
context.browser.find_element_by_css_selector("#content > div.container-fluid > div.alert.alert-danger")
@then(u'an explanation is displayed below the "Model" input')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(2) a").click()
assert(context.browser.find_element_by_css_selector("#tab-data > div.form-group.required.has-error > div > div").text != None)
@given(u'the "Links" tab is selected in tab menu')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(3) a").click()
assert(u'Links' == context.browser.find_element_by_css_selector("#content .nav-tabs li.active a").text)
@given(u'products is in at least one category')
def step_impl(context):
childCategories = context.browser.find_elements_by_css_selector("#product-category div")
categoryCount = len(childCategories)
category = context.browser.find_element_by_css_selector("#product-category div:first-child")
assert(categoryCount > 0)
@when(u'the user clicks on the remove icon next to a category name')
def step_impl(context):
context.browser.find_element_by_css_selector("#product-category > div:first-child > i.fa-minus-circle").click()
@then(u'category dissapears from the list')
def step_impl(context):
childCategories = context.browser.find_elements_by_css_selector("#product-category div")
assert(len(childCategories) != categoryCount)
@then(u'the product is no longer in this category')
def step_impl(context):
childCategories = context.browser.find_elements_by_css_selector("#product-category > div")
for childCategory in childCategories:
assert(childCategory.text != category.text)
@given(u'the "Image" tab is selected in tab menu')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(9) a").click()
assert(u'Image' == context.browser.find_element_by_css_selector("#content .nav-tabs li.active a").text)
@given(u'products has at least one image')
def step_impl(context):
images = context.browser.find_elements_by_css_selector("#images > tbody > tr")
imagesCount = len(images)
assert(imagesCount > 0)
@when(u'the user clicks on the remove icon in the last table column')
def step_impl(context):
context.browser.find_element_by_css_selector("#images > tbody > tr:nth-child(1) > td:nth-child(3) > button").click()
@then(u'image dissapears from the list')
def step_impl(context):
images = context.browser.find_elements_by_css_selector("#images > tbody > tr")
assert(len(images) != imagesCount) | ITS/Project-2/features/steps/edit.py | from behave import *
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import urllib.parse as urlparse
import re
def getToken(browser):
parsed = urlparse.urlparse(browser.current_url)
return urlparse.parse_qs(parsed.query)['token'][0]
category = None
categoryCount = 0
imagesCount = 0
@given(u'a web browser is on a product edit page')
def step_impl(context):
context.browser.get("http://mys01.fit.vutbr.cz:8024/admin/index.php?route=catalog/product/edit&token=%s&product_id=41" % getToken(context.browser))
assert(u'Edit Product' == context.browser.find_element_by_css_selector("#content h3.panel-title").text)
@given(u'the "General" tab is selected in tab menu')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(1) a").click()
assert(u'General' == context.browser.find_element_by_css_selector("#content .nav-tabs li.active a").text)
@given(u'the user adds "42" to the "Product name" input')
def step_impl(context):
nameInput = context.browser.find_element_by_css_selector("#input-name1")
nameInput.send_keys("42")
@when(u'the user clicks on the "Save" button')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .page-header button[type='submit']").click()
@then(u'product edit page is reloaded')
def step_impl(context):
assert(u'Product List' == context.browser.find_element_by_css_selector("#content h3.panel-title").text)
@then(u'the "Product name" value is "iMac42"')
def step_impl(context):
context.browser.get("http://mys01.fit.vutbr.cz:8024/admin/index.php?route=catalog/product/edit&token=%s&product_id=41" % getToken(context.browser))
assert(u'iMac42' == context.browser.find_element_by_css_selector("#input-name1").get_attribute("value"))
@given(u'the "Data" tab is selected in tab menu')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(2) a").click()
assert(u'Data' == context.browser.find_element_by_css_selector("#content .nav-tabs li.active a").text)
@when(u'the user hovers on SKU tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(2) > label > span")).perform()
@then(u'the Stock Keeping Unit is shown')
def step_impl(context):
assert(u'Stock Keeping Unit' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(2) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on UPC tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(3) > label > span")).perform()
@then(u'the Universal Product Code is shown')
def step_impl(context):
assert(u'Universal Product Code' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(3) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on EAN tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(4) > label > span")).perform()
@then(u'the European Article Number is shown')
def step_impl(context):
assert(u'European Article Number' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(4) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on JAN tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(5) > label > span")).perform()
@then(u'the Japanese Article Number is shown')
def step_impl(context):
assert(u'Japanese Article Number' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(5) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on ISBN tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(6) > label > span")).perform()
@then(u'the International Standard Book Number is shown')
def step_impl(context):
assert(u'International Standard Book Number' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(6) > label > span").get_attribute('data-original-title'))
@when(u'the user hovers on MPN tooltip icon')
def step_impl(context):
ActionChains(context.browser).move_to_element(context.browser.find_element_by_css_selector("#tab-data > div:nth-child(7) > label > span")).perform()
@then(u'the Manufacturer Part Number is shown')
def step_impl(context):
assert(u'Manufacturer Part Number' == context.browser.find_element_by_css_selector("#tab-data > div:nth-child(7) > label > span").get_attribute('data-original-title'))
@when(u'the user clicks on calendar icon for "Date Available" input')
def step_impl(context):
context.browser.find_element_by_css_selector("#tab-data > div:nth-child(17) > div > div > span > button").click()
@then(u'a calendar selector is shown below the input')
def step_impl(context):
classAttribute = context.browser.find_element_by_css_selector("body > div.bootstrap-datetimepicker-widget.dropdown-menu").get_attribute("class")
assert(re.search("picker-open", classAttribute) != False)
@given(u'an empty value is in "Model" input')
def step_impl(context):
modelInput = context.browser.find_element_by_css_selector("#input-model")
modelInput.clear()
assert(modelInput.text == '')
@then(u'a warning is displayed below the page title')
def step_impl(context):
context.browser.find_element_by_css_selector("#content > div.container-fluid > div.alert.alert-danger")
@then(u'an explanation is displayed below the "Model" input')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(2) a").click()
assert(context.browser.find_element_by_css_selector("#tab-data > div.form-group.required.has-error > div > div").text != None)
@given(u'the "Links" tab is selected in tab menu')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(3) a").click()
assert(u'Links' == context.browser.find_element_by_css_selector("#content .nav-tabs li.active a").text)
@given(u'products is in at least one category')
def step_impl(context):
childCategories = context.browser.find_elements_by_css_selector("#product-category div")
categoryCount = len(childCategories)
category = context.browser.find_element_by_css_selector("#product-category div:first-child")
assert(categoryCount > 0)
@when(u'the user clicks on the remove icon next to a category name')
def step_impl(context):
context.browser.find_element_by_css_selector("#product-category > div:first-child > i.fa-minus-circle").click()
@then(u'category dissapears from the list')
def step_impl(context):
childCategories = context.browser.find_elements_by_css_selector("#product-category div")
assert(len(childCategories) != categoryCount)
@then(u'the product is no longer in this category')
def step_impl(context):
childCategories = context.browser.find_elements_by_css_selector("#product-category > div")
for childCategory in childCategories:
assert(childCategory.text != category.text)
@given(u'the "Image" tab is selected in tab menu')
def step_impl(context):
context.browser.find_element_by_css_selector("#content .nav-tabs li:nth-child(9) a").click()
assert(u'Image' == context.browser.find_element_by_css_selector("#content .nav-tabs li.active a").text)
@given(u'products has at least one image')
def step_impl(context):
images = context.browser.find_elements_by_css_selector("#images > tbody > tr")
imagesCount = len(images)
assert(imagesCount > 0)
@when(u'the user clicks on the remove icon in the last table column')
def step_impl(context):
context.browser.find_element_by_css_selector("#images > tbody > tr:nth-child(1) > td:nth-child(3) > button").click()
@then(u'image dissapears from the list')
def step_impl(context):
images = context.browser.find_elements_by_css_selector("#images > tbody > tr")
assert(len(images) != imagesCount) | 0.494873 | 0.074231 |